source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
libimagequant.c | /*
** © 2009-2018 by Kornel Lesiński.
** © 1989, 1991 by Jef Poskanzer.
** © 1997, 2000, 2002 by Greg Roelofs; based on an idea by Stefan Schneider.
**
** See COPYRIGHT file for license.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdint.h>
#include <limits.h>
#if !(defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199900L) && !(defined(_MSC_VER) && _MSC_VER >= 1800)
#error "This program requires C99, e.g. -std=c99 switch in GCC or it requires MSVC 18.0 or higher."
#error "Ignore torrent of syntax errors that may follow. It's only because compiler is set to use too old C version."
#endif
#ifdef _OPENMP
#include <omp.h>
#define LIQ_TEMP_ROW_WIDTH(img_width) (((img_width) | 15) + 1) /* keep alignment & leave space between rows to avoid cache line contention */
#else
#define LIQ_TEMP_ROW_WIDTH(img_width) (img_width)
#define omp_get_max_threads() 1
#define omp_get_thread_num() 0
#endif
#include "libimagequant.h"
#include "pam.h"
#include "mediancut.h"
#include "nearest.h"
#include "blur.h"
#include "kmeans.h"
#define LIQ_HIGH_MEMORY_LIMIT (1<<26) /* avoid allocating buffers larger than 64MB */
// each structure has a pointer as a unique identifier that allows type checking at run time
static const char liq_attr_magic[] = "liq_attr";
static const char liq_image_magic[] = "liq_image";
static const char liq_result_magic[] = "liq_result";
static const char liq_histogram_magic[] = "liq_histogram";
static const char liq_remapping_result_magic[] = "liq_remapping_result";
static const char liq_freed_magic[] = "free";
#define CHECK_STRUCT_TYPE(attr, kind) liq_crash_if_invalid_handle_pointer_given((const liq_attr*)attr, kind ## _magic)
#define CHECK_USER_POINTER(ptr) liq_crash_if_invalid_pointer_given(ptr)
struct liq_attr {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
double target_mse, max_mse, kmeans_iteration_limit;
float min_opaque_val;
unsigned int max_colors, max_histogram_entries;
unsigned int min_posterization_output /* user setting */, min_posterization_input /* speed setting */;
unsigned int kmeans_iterations, feedback_loop_trials;
bool last_index_transparent, use_contrast_maps;
unsigned char use_dither_map;
unsigned char speed;
unsigned char progress_stage1, progress_stage2, progress_stage3;
liq_progress_callback_function *progress_callback;
void *progress_callback_user_info;
liq_log_callback_function *log_callback;
void *log_callback_user_info;
liq_log_flush_callback_function *log_flush_callback;
void *log_flush_callback_user_info;
};
struct liq_image {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
f_pixel *f_pixels;
rgba_pixel **rows;
double gamma;
unsigned int width, height;
unsigned char *importance_map, *edges, *dither_map;
rgba_pixel *pixels, *temp_row;
f_pixel *temp_f_row;
liq_image_get_rgba_row_callback *row_callback;
void *row_callback_user_info;
liq_image *background;
float min_opaque_val;
f_pixel fixed_colors[256];
unsigned short fixed_colors_count;
bool free_pixels, free_rows, free_rows_internal;
};
typedef struct liq_remapping_result {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
unsigned char *pixels;
colormap *palette;
liq_progress_callback_function *progress_callback;
void *progress_callback_user_info;
liq_palette int_palette;
double gamma, palette_error;
float dither_level;
unsigned char use_dither_map;
unsigned char progress_stage1;
} liq_remapping_result;
struct liq_result {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
liq_remapping_result *remapping;
colormap *palette;
liq_progress_callback_function *progress_callback;
void *progress_callback_user_info;
liq_palette int_palette;
float dither_level;
double gamma, palette_error;
int min_posterization_output;
unsigned char use_dither_map;
};
struct liq_histogram {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
struct acolorhash_table *acht;
double gamma;
f_pixel fixed_colors[256];
unsigned short fixed_colors_count;
unsigned short ignorebits;
bool had_image_added;
};
static void modify_alpha(liq_image *input_image, rgba_pixel *const row_pixels) LIQ_NONNULL;
static void contrast_maps(liq_image *image) LIQ_NONNULL;
static liq_error finalize_histogram(liq_histogram *input_hist, liq_attr *options, histogram **hist_output) LIQ_NONNULL;
static const rgba_pixel *liq_image_get_row_rgba(liq_image *input_image, unsigned int row) LIQ_NONNULL;
static bool liq_image_get_row_f_init(liq_image *img) LIQ_NONNULL;
static const f_pixel *liq_image_get_row_f(liq_image *input_image, unsigned int row) LIQ_NONNULL;
static void liq_remapping_result_destroy(liq_remapping_result *result) LIQ_NONNULL;
static liq_error pngquant_quantize(histogram *hist, const liq_attr *options, const int fixed_colors_count, const f_pixel fixed_colors[], const double gamma, bool fixed_result_colors, liq_result **) LIQ_NONNULL;
static liq_error liq_histogram_quantize_internal(liq_histogram *input_hist, liq_attr *attr, bool fixed_result_colors, liq_result **result_output) LIQ_NONNULL;
LIQ_NONNULL static void liq_verbose_printf(const liq_attr *context, const char *fmt, ...)
{
if (context->log_callback) {
va_list va;
va_start(va, fmt);
int required_space = vsnprintf(NULL, 0, fmt, va)+1; // +\0
va_end(va);
LIQ_ARRAY(char, buf, required_space);
va_start(va, fmt);
vsnprintf(buf, required_space, fmt, va);
va_end(va);
context->log_callback(context, buf, context->log_callback_user_info);
}
}
LIQ_NONNULL inline static void verbose_print(const liq_attr *attr, const char *msg)
{
if (attr->log_callback) {
attr->log_callback(attr, msg, attr->log_callback_user_info);
}
}
LIQ_NONNULL static void liq_verbose_printf_flush(liq_attr *attr)
{
if (attr->log_flush_callback) {
attr->log_flush_callback(attr, attr->log_flush_callback_user_info);
}
}
LIQ_NONNULL static bool liq_progress(const liq_attr *attr, const float percent)
{
return attr->progress_callback && !attr->progress_callback(percent, attr->progress_callback_user_info);
}
LIQ_NONNULL static bool liq_remap_progress(const liq_remapping_result *quant, const float percent)
{
return quant->progress_callback && !quant->progress_callback(percent, quant->progress_callback_user_info);
}
#if USE_SSE
inline static bool is_sse_available()
{
#if (defined(__x86_64__) || defined(__amd64) || defined(_WIN64))
return true;
#elif _MSC_VER
int info[4];
__cpuid(info, 1);
/* bool is implemented as a built-in type of size 1 in MSVC */
return info[3] & (1<<26) ? true : false;
#else
int a,b,c,d;
cpuid(1, a, b, c, d);
return d & (1<<25); // edx bit 25 is set when SSE is present
#endif
}
#endif
/* make it clear in backtrace when user-supplied handle points to invalid memory */
NEVER_INLINE LIQ_EXPORT bool liq_crash_if_invalid_handle_pointer_given(const liq_attr *user_supplied_pointer, const char *const expected_magic_header);
LIQ_EXPORT bool liq_crash_if_invalid_handle_pointer_given(const liq_attr *user_supplied_pointer, const char *const expected_magic_header)
{
if (!user_supplied_pointer) {
return false;
}
if (user_supplied_pointer->magic_header == liq_freed_magic) {
fprintf(stderr, "%s used after being freed", expected_magic_header);
// this is not normal error handling, this is programmer error that should crash the program.
// program cannot safely continue if memory has been used after it's been freed.
// abort() is nasty, but security vulnerability may be worse.
abort();
}
return user_supplied_pointer->magic_header == expected_magic_header;
}
NEVER_INLINE LIQ_EXPORT bool liq_crash_if_invalid_pointer_given(const void *pointer);
LIQ_EXPORT bool liq_crash_if_invalid_pointer_given(const void *pointer)
{
if (!pointer) {
return false;
}
// Force a read from the given (potentially invalid) memory location in order to check early whether this crashes the program or not.
// It doesn't matter what value is read, the code here is just to shut the compiler up about unused read.
char test_access = *((volatile char *)pointer);
return test_access || true;
}
LIQ_NONNULL static void liq_log_error(const liq_attr *attr, const char *msg)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
liq_verbose_printf(attr, " error: %s", msg);
}
static double quality_to_mse(long quality)
{
if (quality == 0) {
return MAX_DIFF;
}
if (quality == 100) {
return 0;
}
// curve fudged to be roughly similar to quality of libjpeg
// except lowest 10 for really low number of colors
const double extra_low_quality_fudge = MAX(0,0.016/(0.001+quality) - 0.001);
return extra_low_quality_fudge + 2.5/pow(210.0 + quality, 1.2) * (100.1-quality)/100.0;
}
static unsigned int mse_to_quality(double mse)
{
for(int i=100; i > 0; i--) {
if (mse <= quality_to_mse(i) + 0.000001) { // + epsilon for floating point errors
return i;
}
}
return 0;
}
/** internally MSE is a sum of all channels with pixels 0..1 range,
but other software gives per-RGB-channel MSE for 0..255 range */
static double mse_to_standard_mse(double mse) {
return mse * 65536.0/6.0;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_quality(liq_attr* attr, int minimum, int target)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (target < 0 || target > 100 || target < minimum || minimum < 0) return LIQ_VALUE_OUT_OF_RANGE;
attr->target_mse = quality_to_mse(target);
attr->max_mse = quality_to_mse(minimum);
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_min_quality(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return mse_to_quality(attr->max_mse);
}
LIQ_EXPORT LIQ_NONNULL int liq_get_max_quality(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return mse_to_quality(attr->target_mse);
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_max_colors(liq_attr* attr, int colors)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (colors < 2 || colors > 256) return LIQ_VALUE_OUT_OF_RANGE;
attr->max_colors = colors;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_max_colors(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return attr->max_colors;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_min_posterization(liq_attr *attr, int bits)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (bits < 0 || bits > 4) return LIQ_VALUE_OUT_OF_RANGE;
attr->min_posterization_output = bits;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_min_posterization(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return attr->min_posterization_output;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_speed(liq_attr* attr, int speed)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (speed < 1 || speed > 10) return LIQ_VALUE_OUT_OF_RANGE;
unsigned int iterations = MAX(8-speed, 0);
iterations += iterations * iterations/2;
attr->kmeans_iterations = iterations;
attr->kmeans_iteration_limit = 1.0/(double)(1<<(23-speed));
attr->feedback_loop_trials = MAX(56-9*speed, 0);
attr->max_histogram_entries = (1<<17) + (1<<18)*(10-speed);
attr->min_posterization_input = (speed >= 8) ? 1 : 0;
attr->use_dither_map = (speed <= (omp_get_max_threads() > 1 ? 7 : 5)); // parallelized dither map might speed up floyd remapping
if (attr->use_dither_map && speed < 3) {
attr->use_dither_map = 2; // always
}
attr->use_contrast_maps = (speed <= 7) || attr->use_dither_map;
attr->speed = speed;
attr->progress_stage1 = attr->use_contrast_maps ? 20 : 8;
if (attr->feedback_loop_trials < 2) {
attr->progress_stage1 += 30;
}
attr->progress_stage3 = 50 / (1+speed);
attr->progress_stage2 = 100 - attr->progress_stage1 - attr->progress_stage3;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_speed(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return attr->speed;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_output_gamma(liq_result* res, double gamma)
{
if (!CHECK_STRUCT_TYPE(res, liq_result)) return LIQ_INVALID_POINTER;
if (gamma <= 0 || gamma >= 1.0) return LIQ_VALUE_OUT_OF_RANGE;
if (res->remapping) {
liq_remapping_result_destroy(res->remapping);
res->remapping = NULL;
}
res->gamma = gamma;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_min_opacity(liq_attr* attr, int min)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (min < 0 || min > 255) return LIQ_VALUE_OUT_OF_RANGE;
attr->min_opaque_val = (double)min/255.0;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_min_opacity(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return MIN(255.f, 256.f * attr->min_opaque_val);
}
LIQ_EXPORT LIQ_NONNULL void liq_set_last_index_transparent(liq_attr* attr, int is_last)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
attr->last_index_transparent = !!is_last;
}
LIQ_EXPORT void liq_attr_set_progress_callback(liq_attr *attr, liq_progress_callback_function *callback, void *user_info)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
attr->progress_callback = callback;
attr->progress_callback_user_info = user_info;
}
LIQ_EXPORT void liq_result_set_progress_callback(liq_result *result, liq_progress_callback_function *callback, void *user_info)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) return;
result->progress_callback = callback;
result->progress_callback_user_info = user_info;
}
LIQ_EXPORT void liq_set_log_callback(liq_attr *attr, liq_log_callback_function *callback, void* user_info)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
liq_verbose_printf_flush(attr);
attr->log_callback = callback;
attr->log_callback_user_info = user_info;
}
LIQ_EXPORT void liq_set_log_flush_callback(liq_attr *attr, liq_log_flush_callback_function *callback, void* user_info)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
attr->log_flush_callback = callback;
attr->log_flush_callback_user_info = user_info;
}
LIQ_EXPORT liq_attr* liq_attr_create()
{
return liq_attr_create_with_allocator(NULL, NULL);
}
LIQ_EXPORT LIQ_NONNULL void liq_attr_destroy(liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) {
return;
}
liq_verbose_printf_flush(attr);
attr->magic_header = liq_freed_magic;
attr->free(attr);
}
LIQ_EXPORT LIQ_NONNULL liq_attr* liq_attr_copy(const liq_attr *orig)
{
if (!CHECK_STRUCT_TYPE(orig, liq_attr)) {
return NULL;
}
liq_attr *attr = orig->malloc(sizeof(liq_attr));
if (!attr) return NULL;
*attr = *orig;
return attr;
}
static void *liq_aligned_malloc(size_t size)
{
unsigned char *ptr = malloc(size + 16);
if (!ptr) {
return NULL;
}
uintptr_t offset = 16 - ((uintptr_t)ptr & 15); // also reserves 1 byte for ptr[-1]
ptr += offset;
assert(0 == (((uintptr_t)ptr) & 15));
ptr[-1] = offset ^ 0x59; // store how much pointer was shifted to get the original for free()
return ptr;
}
LIQ_NONNULL static void liq_aligned_free(void *inptr)
{
unsigned char *ptr = inptr;
size_t offset = ptr[-1] ^ 0x59;
assert(offset > 0 && offset <= 16);
free(ptr - offset);
}
LIQ_EXPORT liq_attr* liq_attr_create_with_allocator(void* (*custom_malloc)(size_t), void (*custom_free)(void*))
{
#if USE_SSE
if (!is_sse_available()) {
return NULL;
}
#endif
if (!custom_malloc && !custom_free) {
custom_malloc = liq_aligned_malloc;
custom_free = liq_aligned_free;
} else if (!custom_malloc != !custom_free) {
return NULL; // either specify both or none
}
liq_attr *attr = custom_malloc(sizeof(liq_attr));
if (!attr) return NULL;
*attr = (liq_attr) {
.magic_header = liq_attr_magic,
.malloc = custom_malloc,
.free = custom_free,
.max_colors = 256,
.min_opaque_val = 1, // whether preserve opaque colors for IE (1.0=no, does not affect alpha)
.last_index_transparent = false, // puts transparent color at last index. This is workaround for blu-ray subtitles.
.target_mse = 0,
.max_mse = MAX_DIFF,
};
liq_set_speed(attr, 4);
return attr;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_image_add_fixed_color(liq_image *img, liq_color color)
{
if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER;
if (img->fixed_colors_count > 255) return LIQ_UNSUPPORTED;
float gamma_lut[256];
to_f_set_gamma(gamma_lut, img->gamma);
img->fixed_colors[img->fixed_colors_count++] = rgba_to_f(gamma_lut, (rgba_pixel){
.r = color.r,
.g = color.g,
.b = color.b,
.a = color.a,
});
return LIQ_OK;
}
LIQ_NONNULL static liq_error liq_histogram_add_fixed_color_f(liq_histogram *hist, f_pixel color)
{
if (hist->fixed_colors_count > 255) return LIQ_UNSUPPORTED;
hist->fixed_colors[hist->fixed_colors_count++] = color;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_fixed_color(liq_histogram *hist, liq_color color, double gamma)
{
if (!CHECK_STRUCT_TYPE(hist, liq_histogram)) return LIQ_INVALID_POINTER;
float gamma_lut[256];
to_f_set_gamma(gamma_lut, gamma ? gamma : 0.45455);
const f_pixel px = rgba_to_f(gamma_lut, (rgba_pixel){
.r = color.r,
.g = color.g,
.b = color.b,
.a = color.a,
});
return liq_histogram_add_fixed_color_f(hist, px);
}
LIQ_NONNULL static bool liq_image_use_low_memory(liq_image *img)
{
img->temp_f_row = img->malloc(sizeof(img->f_pixels[0]) * LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_max_threads());
return img->temp_f_row != NULL;
}
LIQ_NONNULL static bool liq_image_should_use_low_memory(liq_image *img, const bool low_memory_hint)
{
return (size_t)img->width * (size_t)img->height > (low_memory_hint ? LIQ_HIGH_MEMORY_LIMIT/8 : LIQ_HIGH_MEMORY_LIMIT) / sizeof(f_pixel); // Watch out for integer overflow
}
static liq_image *liq_image_create_internal(const liq_attr *attr, rgba_pixel* rows[], liq_image_get_rgba_row_callback *row_callback, void *row_callback_user_info, int width, int height, double gamma)
{
if (gamma < 0 || gamma > 1.0) {
liq_log_error(attr, "gamma must be >= 0 and <= 1 (try 1/gamma instead)");
return NULL;
}
if (!rows && !row_callback) {
liq_log_error(attr, "missing row data");
return NULL;
}
liq_image *img = attr->malloc(sizeof(liq_image));
if (!img) return NULL;
*img = (liq_image){
.magic_header = liq_image_magic,
.malloc = attr->malloc,
.free = attr->free,
.width = width, .height = height,
.gamma = gamma ? gamma : 0.45455,
.rows = rows,
.row_callback = row_callback,
.row_callback_user_info = row_callback_user_info,
.min_opaque_val = attr->min_opaque_val,
};
if (!rows || attr->min_opaque_val < 1.f) {
img->temp_row = attr->malloc(sizeof(img->temp_row[0]) * LIQ_TEMP_ROW_WIDTH(width) * omp_get_max_threads());
if (!img->temp_row) return NULL;
}
// if image is huge or converted pixels are not likely to be reused then don't cache converted pixels
if (liq_image_should_use_low_memory(img, !img->temp_row && !attr->use_contrast_maps && !attr->use_dither_map)) {
verbose_print(attr, " conserving memory");
if (!liq_image_use_low_memory(img)) return NULL;
}
if (img->min_opaque_val < 1.f) {
verbose_print(attr, " Working around IE6 bug by making image less transparent...");
}
return img;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_memory_ownership(liq_image *img, int ownership_flags)
{
if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER;
if (!img->rows || !ownership_flags || (ownership_flags & ~(LIQ_OWN_ROWS|LIQ_OWN_PIXELS))) {
return LIQ_VALUE_OUT_OF_RANGE;
}
if (ownership_flags & LIQ_OWN_ROWS) {
if (img->free_rows_internal) return LIQ_VALUE_OUT_OF_RANGE;
img->free_rows = true;
}
if (ownership_flags & LIQ_OWN_PIXELS) {
img->free_pixels = true;
if (!img->pixels) {
// for simplicity of this API there's no explicit bitmap argument,
// so the row with the lowest address is assumed to be at the start of the bitmap
img->pixels = img->rows[0];
for(unsigned int i=1; i < img->height; i++) {
img->pixels = MIN(img->pixels, img->rows[i]);
}
}
}
return LIQ_OK;
}
LIQ_NONNULL static void liq_image_free_maps(liq_image *input_image);
LIQ_NONNULL static void liq_image_free_importance_map(liq_image *input_image);
LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_importance_map(liq_image *img, unsigned char importance_map[], size_t buffer_size, enum liq_ownership ownership) {
if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER;
if (!CHECK_USER_POINTER(importance_map)) return LIQ_INVALID_POINTER;
const size_t required_size = (size_t)img->width * (size_t)img->height;
if (buffer_size < required_size) {
return LIQ_BUFFER_TOO_SMALL;
}
if (ownership == LIQ_COPY_PIXELS) {
unsigned char *tmp = img->malloc(required_size);
if (!tmp) {
return LIQ_OUT_OF_MEMORY;
}
memcpy(tmp, importance_map, required_size);
importance_map = tmp;
} else if (ownership != LIQ_OWN_PIXELS) {
return LIQ_UNSUPPORTED;
}
liq_image_free_importance_map(img);
img->importance_map = importance_map;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_background(liq_image *img, liq_image *background)
{
if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER;
if (!CHECK_STRUCT_TYPE(background, liq_image)) return LIQ_INVALID_POINTER;
if (background->background) {
return LIQ_UNSUPPORTED;
}
if (img->width != background->width || img->height != background->height) {
return LIQ_BUFFER_TOO_SMALL;
}
if (img->background) {
liq_image_destroy(img->background);
}
img->background = background;
liq_image_free_maps(img); // Force them to be re-analyzed with the background
return LIQ_OK;
}
LIQ_NONNULL static bool check_image_size(const liq_attr *attr, const int width, const int height)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) {
return false;
}
if (width <= 0 || height <= 0) {
liq_log_error(attr, "width and height must be > 0");
return false;
}
if (width > INT_MAX/sizeof(rgba_pixel)/height || width > INT_MAX/16/sizeof(f_pixel) || height > INT_MAX/sizeof(size_t)) {
liq_log_error(attr, "image too large");
return false;
}
return true;
}
LIQ_EXPORT liq_image *liq_image_create_custom(const liq_attr *attr, liq_image_get_rgba_row_callback *row_callback, void* user_info, int width, int height, double gamma)
{
if (!check_image_size(attr, width, height)) {
return NULL;
}
return liq_image_create_internal(attr, NULL, row_callback, user_info, width, height, gamma);
}
LIQ_EXPORT liq_image *liq_image_create_rgba_rows(const liq_attr *attr, void *const rows[], int width, int height, double gamma)
{
if (!check_image_size(attr, width, height)) {
return NULL;
}
for(int i=0; i < height; i++) {
if (!CHECK_USER_POINTER(rows+i) || !CHECK_USER_POINTER(rows[i])) {
liq_log_error(attr, "invalid row pointers");
return NULL;
}
}
return liq_image_create_internal(attr, (rgba_pixel**)rows, NULL, NULL, width, height, gamma);
}
LIQ_EXPORT LIQ_NONNULL liq_image *liq_image_create_rgba(const liq_attr *attr, const void* bitmap, int width, int height, double gamma)
{
if (!check_image_size(attr, width, height)) {
return NULL;
}
if (!CHECK_USER_POINTER(bitmap)) {
liq_log_error(attr, "invalid bitmap pointer");
return NULL;
}
rgba_pixel *const pixels = (rgba_pixel *const)bitmap;
rgba_pixel **rows = attr->malloc(sizeof(rows[0])*height);
if (!rows) return NULL;
for(int i=0; i < height; i++) {
rows[i] = pixels + width * i;
}
liq_image *image = liq_image_create_internal(attr, rows, NULL, NULL, width, height, gamma);
if (!image) {
attr->free(rows);
return NULL;
}
image->free_rows = true;
image->free_rows_internal = true;
return image;
}
NEVER_INLINE LIQ_EXPORT void liq_executing_user_callback(liq_image_get_rgba_row_callback *callback, liq_color *temp_row, int row, int width, void *user_info);
LIQ_EXPORT void liq_executing_user_callback(liq_image_get_rgba_row_callback *callback, liq_color *temp_row, int row, int width, void *user_info)
{
assert(callback);
assert(temp_row);
callback(temp_row, row, width, user_info);
}
LIQ_NONNULL inline static bool liq_image_has_rgba_pixels(const liq_image *img)
{
if (!CHECK_STRUCT_TYPE(img, liq_image)) {
return false;
}
return img->rows || (img->temp_row && img->row_callback);
}
LIQ_NONNULL inline static bool liq_image_can_use_rgba_rows(const liq_image *img)
{
assert(liq_image_has_rgba_pixels(img));
const bool iebug = img->min_opaque_val < 1.f;
return (img->rows && !iebug);
}
LIQ_NONNULL static const rgba_pixel *liq_image_get_row_rgba(liq_image *img, unsigned int row)
{
if (liq_image_can_use_rgba_rows(img)) {
return img->rows[row];
}
assert(img->temp_row);
rgba_pixel *temp_row = img->temp_row + LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_thread_num();
if (img->rows) {
memcpy(temp_row, img->rows[row], img->width * sizeof(temp_row[0]));
} else {
liq_executing_user_callback(img->row_callback, (liq_color*)temp_row, row, img->width, img->row_callback_user_info);
}
if (img->min_opaque_val < 1.f) modify_alpha(img, temp_row);
return temp_row;
}
LIQ_NONNULL static void convert_row_to_f(liq_image *img, f_pixel *row_f_pixels, const unsigned int row, const float gamma_lut[])
{
assert(row_f_pixels);
assert(!USE_SSE || 0 == ((uintptr_t)row_f_pixels & 15));
const rgba_pixel *const row_pixels = liq_image_get_row_rgba(img, row);
for(unsigned int col=0; col < img->width; col++) {
row_f_pixels[col] = rgba_to_f(gamma_lut, row_pixels[col]);
}
}
LIQ_NONNULL static bool liq_image_get_row_f_init(liq_image *img)
{
assert(omp_get_thread_num() == 0);
if (img->f_pixels) {
return true;
}
if (!liq_image_should_use_low_memory(img, false)) {
img->f_pixels = img->malloc(sizeof(img->f_pixels[0]) * img->width * img->height);
}
if (!img->f_pixels) {
return liq_image_use_low_memory(img);
}
if (!liq_image_has_rgba_pixels(img)) {
return false;
}
float gamma_lut[256];
to_f_set_gamma(gamma_lut, img->gamma);
for(unsigned int i=0; i < img->height; i++) {
convert_row_to_f(img, &img->f_pixels[i*img->width], i, gamma_lut);
}
return true;
}
LIQ_NONNULL static const f_pixel *liq_image_get_row_f(liq_image *img, unsigned int row)
{
if (!img->f_pixels) {
assert(img->temp_f_row); // init should have done that
float gamma_lut[256];
to_f_set_gamma(gamma_lut, img->gamma);
f_pixel *row_for_thread = img->temp_f_row + LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_thread_num();
convert_row_to_f(img, row_for_thread, row, gamma_lut);
return row_for_thread;
}
return img->f_pixels + img->width * row;
}
LIQ_EXPORT LIQ_NONNULL int liq_image_get_width(const liq_image *input_image)
{
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return -1;
return input_image->width;
}
LIQ_EXPORT LIQ_NONNULL int liq_image_get_height(const liq_image *input_image)
{
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return -1;
return input_image->height;
}
typedef void free_func(void*);
LIQ_NONNULL static free_func *get_default_free_func(liq_image *img)
{
// When default allocator is used then user-supplied pointers must be freed with free()
if (img->free_rows_internal || img->free != liq_aligned_free) {
return img->free;
}
return free;
}
LIQ_NONNULL static void liq_image_free_rgba_source(liq_image *input_image)
{
if (input_image->free_pixels && input_image->pixels) {
get_default_free_func(input_image)(input_image->pixels);
input_image->pixels = NULL;
}
if (input_image->free_rows && input_image->rows) {
get_default_free_func(input_image)(input_image->rows);
input_image->rows = NULL;
}
}
LIQ_NONNULL static void liq_image_free_importance_map(liq_image *input_image) {
if (input_image->importance_map) {
input_image->free(input_image->importance_map);
input_image->importance_map = NULL;
}
}
LIQ_NONNULL static void liq_image_free_maps(liq_image *input_image) {
liq_image_free_importance_map(input_image);
if (input_image->edges) {
input_image->free(input_image->edges);
input_image->edges = NULL;
}
if (input_image->dither_map) {
input_image->free(input_image->dither_map);
input_image->dither_map = NULL;
}
}
LIQ_EXPORT LIQ_NONNULL void liq_image_destroy(liq_image *input_image)
{
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return;
liq_image_free_rgba_source(input_image);
liq_image_free_maps(input_image);
if (input_image->f_pixels) {
input_image->free(input_image->f_pixels);
}
if (input_image->temp_row) {
input_image->free(input_image->temp_row);
}
if (input_image->temp_f_row) {
input_image->free(input_image->temp_f_row);
}
if (input_image->background) {
liq_image_destroy(input_image->background);
}
input_image->magic_header = liq_freed_magic;
input_image->free(input_image);
}
LIQ_EXPORT liq_histogram* liq_histogram_create(const liq_attr* attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) {
return NULL;
}
liq_histogram *hist = attr->malloc(sizeof(liq_histogram));
if (!hist) return NULL;
*hist = (liq_histogram) {
.magic_header = liq_histogram_magic,
.malloc = attr->malloc,
.free = attr->free,
.ignorebits = MAX(attr->min_posterization_output, attr->min_posterization_input),
};
return hist;
}
LIQ_EXPORT LIQ_NONNULL void liq_histogram_destroy(liq_histogram *hist)
{
if (!CHECK_STRUCT_TYPE(hist, liq_histogram)) return;
hist->magic_header = liq_freed_magic;
pam_freeacolorhash(hist->acht);
hist->free(hist);
}
LIQ_EXPORT LIQ_NONNULL liq_result *liq_quantize_image(liq_attr *attr, liq_image *img)
{
liq_result *res;
if (LIQ_OK != liq_image_quantize(img, attr, &res)) {
return NULL;
}
return res;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_image_quantize(liq_image *const img, liq_attr *const attr, liq_result **result_output)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (!liq_image_has_rgba_pixels(img)) {
return LIQ_UNSUPPORTED;
}
liq_histogram *hist = liq_histogram_create(attr);
if (!hist) {
return LIQ_OUT_OF_MEMORY;
}
liq_error err = liq_histogram_add_image(hist, attr, img);
if (LIQ_OK != err) {
return err;
}
err = liq_histogram_quantize_internal(hist, attr, false, result_output);
liq_histogram_destroy(hist);
return err;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_quantize(liq_histogram *input_hist, liq_attr *attr, liq_result **result_output) {
return liq_histogram_quantize_internal(input_hist, attr, true, result_output);
}
LIQ_NONNULL static liq_error liq_histogram_quantize_internal(liq_histogram *input_hist, liq_attr *attr, bool fixed_result_colors, liq_result **result_output)
{
if (!CHECK_USER_POINTER(result_output)) return LIQ_INVALID_POINTER;
*result_output = NULL;
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER;
if (liq_progress(attr, 0)) return LIQ_ABORTED;
histogram *hist;
liq_error err = finalize_histogram(input_hist, attr, &hist);
if (err != LIQ_OK) {
return err;
}
err = pngquant_quantize(hist, attr, input_hist->fixed_colors_count, input_hist->fixed_colors, input_hist->gamma, fixed_result_colors, result_output);
pam_freeacolorhist(hist);
return err;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_dithering_level(liq_result *res, float dither_level)
{
if (!CHECK_STRUCT_TYPE(res, liq_result)) return LIQ_INVALID_POINTER;
if (res->remapping) {
liq_remapping_result_destroy(res->remapping);
res->remapping = NULL;
}
if (dither_level < 0 || dither_level > 1.0f) return LIQ_VALUE_OUT_OF_RANGE;
res->dither_level = dither_level;
return LIQ_OK;
}
LIQ_NONNULL static liq_remapping_result *liq_remapping_result_create(liq_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) {
return NULL;
}
liq_remapping_result *res = result->malloc(sizeof(liq_remapping_result));
if (!res) return NULL;
*res = (liq_remapping_result) {
.magic_header = liq_remapping_result_magic,
.malloc = result->malloc,
.free = result->free,
.dither_level = result->dither_level,
.use_dither_map = result->use_dither_map,
.palette_error = result->palette_error,
.gamma = result->gamma,
.palette = pam_duplicate_colormap(result->palette),
.progress_callback = result->progress_callback,
.progress_callback_user_info = result->progress_callback_user_info,
.progress_stage1 = result->use_dither_map ? 20 : 0,
};
return res;
}
LIQ_EXPORT LIQ_NONNULL double liq_get_output_gamma(const liq_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1;
return result->gamma;
}
LIQ_NONNULL static void liq_remapping_result_destroy(liq_remapping_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_remapping_result)) return;
if (result->palette) pam_freecolormap(result->palette);
if (result->pixels) result->free(result->pixels);
result->magic_header = liq_freed_magic;
result->free(result);
}
LIQ_EXPORT LIQ_NONNULL void liq_result_destroy(liq_result *res)
{
if (!CHECK_STRUCT_TYPE(res, liq_result)) return;
memset(&res->int_palette, 0, sizeof(liq_palette));
if (res->remapping) {
memset(&res->remapping->int_palette, 0, sizeof(liq_palette));
liq_remapping_result_destroy(res->remapping);
}
pam_freecolormap(res->palette);
res->magic_header = liq_freed_magic;
res->free(res);
}
LIQ_EXPORT LIQ_NONNULL double liq_get_quantization_error(const liq_result *result) {
if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1;
if (result->palette_error >= 0) {
return mse_to_standard_mse(result->palette_error);
}
return -1;
}
LIQ_EXPORT LIQ_NONNULL double liq_get_remapping_error(const liq_result *result) {
if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1;
if (result->remapping && result->remapping->palette_error >= 0) {
return mse_to_standard_mse(result->remapping->palette_error);
}
return -1;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_quantization_quality(const liq_result *result) {
if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1;
if (result->palette_error >= 0) {
return mse_to_quality(result->palette_error);
}
return -1;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_remapping_quality(const liq_result *result) {
if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1;
if (result->remapping && result->remapping->palette_error >= 0) {
return mse_to_quality(result->remapping->palette_error);
}
return -1;
}
LIQ_NONNULL static int compare_popularity(const void *ch1, const void *ch2)
{
const float v1 = ((const colormap_item*)ch1)->popularity;
const float v2 = ((const colormap_item*)ch2)->popularity;
return v1 > v2 ? -1 : 1;
}
LIQ_NONNULL static void sort_palette_qsort(colormap *map, int start, int nelem)
{
if (!nelem) return;
qsort(map->palette + start, nelem, sizeof(map->palette[0]), compare_popularity);
}
#define SWAP_PALETTE(map, a,b) { \
const colormap_item tmp = (map)->palette[(a)]; \
(map)->palette[(a)] = (map)->palette[(b)]; \
(map)->palette[(b)] = tmp; }
LIQ_NONNULL static void sort_palette(colormap *map, const liq_attr *options)
{
/*
** Step 3.5 [GRR]: remap the palette colors so that all entries with
** the maximal alpha value (i.e., fully opaque) are at the end and can
** therefore be omitted from the tRNS chunk.
*/
if (options->last_index_transparent) {
for(unsigned int i=0; i < map->colors; i++) {
if (map->palette[i].acolor.a < 1.f/256.f) {
const unsigned int old = i, transparent_dest = map->colors-1;
SWAP_PALETTE(map, transparent_dest, old);
/* colors sorted by popularity make pngs slightly more compressible */
sort_palette_qsort(map, 0, map->colors-1);
return;
}
}
}
unsigned int non_fixed_colors = 0;
for(unsigned int i = 0; i < map->colors; i++) {
if (map->palette[i].fixed) {
break;
}
non_fixed_colors++;
}
/* move transparent colors to the beginning to shrink trns chunk */
unsigned int num_transparent = 0;
for(unsigned int i = 0; i < non_fixed_colors; i++) {
if (map->palette[i].acolor.a < 255.f/256.f) {
// current transparent color is swapped with earlier opaque one
if (i != num_transparent) {
SWAP_PALETTE(map, num_transparent, i);
i--;
}
num_transparent++;
}
}
liq_verbose_printf(options, " eliminated opaque tRNS-chunk entries...%d entr%s transparent", num_transparent, (num_transparent == 1)? "y" : "ies");
/* colors sorted by popularity make pngs slightly more compressible
* opaque and transparent are sorted separately
*/
sort_palette_qsort(map, 0, num_transparent);
sort_palette_qsort(map, num_transparent, non_fixed_colors - num_transparent);
if (non_fixed_colors > 9 && map->colors > 16) {
SWAP_PALETTE(map, 7, 1); // slightly improves compression
SWAP_PALETTE(map, 8, 2);
SWAP_PALETTE(map, 9, 3);
}
}
inline static unsigned int posterize_channel(unsigned int color, unsigned int bits)
{
return (color & ~((1<<bits)-1)) | (color >> (8-bits));
}
LIQ_NONNULL static void set_rounded_palette(liq_palette *const dest, colormap *const map, const double gamma, unsigned int posterize)
{
float gamma_lut[256];
to_f_set_gamma(gamma_lut, gamma);
dest->count = map->colors;
for(unsigned int x = 0; x < map->colors; ++x) {
rgba_pixel px = f_to_rgb(gamma, map->palette[x].acolor);
px.r = posterize_channel(px.r, posterize);
px.g = posterize_channel(px.g, posterize);
px.b = posterize_channel(px.b, posterize);
px.a = posterize_channel(px.a, posterize);
map->palette[x].acolor = rgba_to_f(gamma_lut, px); /* saves rounding error introduced by to_rgb, which makes remapping & dithering more accurate */
if (!px.a && !map->palette[x].fixed) {
px.r = 71; px.g = 112; px.b = 76;
}
dest->entries[x] = (liq_color){.r=px.r,.g=px.g,.b=px.b,.a=px.a};
}
}
LIQ_EXPORT LIQ_NONNULL const liq_palette *liq_get_palette(liq_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) return NULL;
if (result->remapping && result->remapping->int_palette.count) {
return &result->remapping->int_palette;
}
if (!result->int_palette.count) {
set_rounded_palette(&result->int_palette, result->palette, result->gamma, result->min_posterization_output);
}
return &result->int_palette;
}
LIQ_NONNULL static float remap_to_palette(liq_image *const input_image, unsigned char *const *const output_pixels, colormap *const map)
{
const int rows = input_image->height;
const unsigned int cols = input_image->width;
double remapping_error=0;
if (!liq_image_get_row_f_init(input_image)) {
return -1;
}
if (input_image->background && !liq_image_get_row_f_init(input_image->background)) {
return -1;
}
const colormap_item *acolormap = map->palette;
struct nearest_map *const n = nearest_init(map);
const int transparent_index = input_image->background ? nearest_search(n, &(f_pixel){0,0,0,0}, 0, NULL) : 0;
const unsigned int max_threads = omp_get_max_threads();
LIQ_ARRAY(kmeans_state, average_color, (KMEANS_CACHE_LINE_GAP+map->colors) * max_threads);
kmeans_init(map, max_threads, average_color);
#if __GNUC__ >= 9
#pragma omp parallel for if (rows*cols > 3000) \
schedule(static) default(none) shared(acolormap,average_color,cols,input_image,map,n,output_pixels,rows,transparent_index) reduction(+:remapping_error)
#else
#pragma omp parallel for if (rows*cols > 3000) \
schedule(static) default(none) shared(acolormap) shared(average_color) reduction(+:remapping_error)
#endif
for(int row = 0; row < rows; ++row) {
const f_pixel *const row_pixels = liq_image_get_row_f(input_image, row);
const f_pixel *const bg_pixels = input_image->background && acolormap[transparent_index].acolor.a < 1.f/256.f ? liq_image_get_row_f(input_image->background, row) : NULL;
unsigned int last_match=0;
for(unsigned int col = 0; col < cols; ++col) {
float diff;
last_match = nearest_search(n, &row_pixels[col], last_match, &diff);
if (bg_pixels && colordifference(bg_pixels[col], acolormap[last_match].acolor) <= diff) {
last_match = transparent_index;
}
output_pixels[row][col] = last_match;
remapping_error += diff;
kmeans_update_color(row_pixels[col], 1.0, map, last_match, omp_get_thread_num(), average_color);
}
}
kmeans_finalize(map, max_threads, average_color);
nearest_free(n);
return remapping_error / (input_image->width * input_image->height);
}
inline static f_pixel get_dithered_pixel(const float dither_level, const float max_dither_error, const f_pixel thiserr, const f_pixel px)
{
/* Use Floyd-Steinberg errors to adjust actual color. */
const float sr = thiserr.r * dither_level,
sg = thiserr.g * dither_level,
sb = thiserr.b * dither_level,
sa = thiserr.a * dither_level;
float ratio = 1.0;
const float max_overflow = 1.1f;
const float max_underflow = -0.1f;
// allowing some overflow prevents undithered bands caused by clamping of all channels
if (px.r + sr > max_overflow) ratio = MIN(ratio, (max_overflow -px.r)/sr);
else { if (px.r + sr < max_underflow) ratio = MIN(ratio, (max_underflow-px.r)/sr); }
if (px.g + sg > max_overflow) ratio = MIN(ratio, (max_overflow -px.g)/sg);
else { if (px.g + sg < max_underflow) ratio = MIN(ratio, (max_underflow-px.g)/sg); }
if (px.b + sb > max_overflow) ratio = MIN(ratio, (max_overflow -px.b)/sb);
else { if (px.b + sb < max_underflow) ratio = MIN(ratio, (max_underflow-px.b)/sb); }
float a = px.a + sa;
if (a > 1.f) { a = 1.f; }
else if (a < 0) { a = 0; }
// If dithering error is crazy high, don't propagate it that much
// This prevents crazy geen pixels popping out of the blue (or red or black! ;)
const float dither_error = sr*sr + sg*sg + sb*sb + sa*sa;
if (dither_error > max_dither_error) {
ratio *= 0.8f;
} else if (dither_error < 2.f/256.f/256.f) {
// don't dither areas that don't have noticeable error — makes file smaller
return px;
}
return (f_pixel) {
.r=px.r + sr * ratio,
.g=px.g + sg * ratio,
.b=px.b + sb * ratio,
.a=a,
};
}
/**
Uses edge/noise map to apply dithering only to flat areas. Dithering on edges creates jagged lines, and noisy areas are "naturally" dithered.
If output_image_is_remapped is true, only pixels noticeably changed by error diffusion will be written to output image.
*/
LIQ_NONNULL static bool remap_to_palette_floyd(liq_image *input_image, unsigned char *const output_pixels[], liq_remapping_result *quant, const float max_dither_error, const bool output_image_is_remapped)
{
const int rows = input_image->height, cols = input_image->width;
const unsigned char *dither_map = quant->use_dither_map ? (input_image->dither_map ? input_image->dither_map : input_image->edges) : NULL;
const colormap *map = quant->palette;
const colormap_item *acolormap = map->palette;
if (!liq_image_get_row_f_init(input_image)) {
return false;
}
if (input_image->background && !liq_image_get_row_f_init(input_image->background)) {
return false;
}
/* Initialize Floyd-Steinberg error vectors. */
const size_t errwidth = cols+2;
f_pixel *restrict thiserr = input_image->malloc(errwidth * sizeof(thiserr[0]) * 2); // +2 saves from checking out of bounds access
if (!thiserr) return false;
f_pixel *restrict nexterr = thiserr + errwidth;
memset(thiserr, 0, errwidth * sizeof(thiserr[0]));
bool ok = true;
struct nearest_map *const n = nearest_init(map);
const int transparent_index = input_image->background ? nearest_search(n, &(f_pixel){0,0,0,0}, 0, NULL) : 0;
// response to this value is non-linear and without it any value < 0.8 would give almost no dithering
float base_dithering_level = quant->dither_level;
base_dithering_level = 1.f - (1.f-base_dithering_level)*(1.f-base_dithering_level);
if (dither_map) {
base_dithering_level *= 1.f/255.f; // convert byte to float
}
base_dithering_level *= 15.f/16.f; // prevent small errors from accumulating
int fs_direction = 1;
unsigned int last_match=0;
for (int row = 0; row < rows; ++row) {
if (liq_remap_progress(quant, quant->progress_stage1 + row * (100.f - quant->progress_stage1) / rows)) {
ok = false;
break;
}
memset(nexterr, 0, errwidth * sizeof(nexterr[0]));
int col = (fs_direction > 0) ? 0 : (cols - 1);
const f_pixel *const row_pixels = liq_image_get_row_f(input_image, row);
const f_pixel *const bg_pixels = input_image->background && acolormap[transparent_index].acolor.a < 1.f/256.f ? liq_image_get_row_f(input_image->background, row) : NULL;
do {
float dither_level = base_dithering_level;
if (dither_map) {
dither_level *= dither_map[row*cols + col];
}
const f_pixel spx = get_dithered_pixel(dither_level, max_dither_error, thiserr[col + 1], row_pixels[col]);
const unsigned int guessed_match = output_image_is_remapped ? output_pixels[row][col] : last_match;
float diff;
last_match = nearest_search(n, &spx, guessed_match, &diff);
f_pixel output_px = acolormap[last_match].acolor;
if (bg_pixels && colordifference(bg_pixels[col], output_px) <= diff) {
output_px = bg_pixels[col];
output_pixels[row][col] = transparent_index;
} else {
output_pixels[row][col] = last_match;
}
f_pixel err = {
.r = (spx.r - output_px.r),
.g = (spx.g - output_px.g),
.b = (spx.b - output_px.b),
.a = (spx.a - output_px.a),
};
// If dithering error is crazy high, don't propagate it that much
// This prevents crazy geen pixels popping out of the blue (or red or black! ;)
if (err.r*err.r + err.g*err.g + err.b*err.b + err.a*err.a > max_dither_error) {
err.r *= 0.75f;
err.g *= 0.75f;
err.b *= 0.75f;
err.a *= 0.75f;
}
/* Propagate Floyd-Steinberg error terms. */
if (fs_direction > 0) {
thiserr[col + 2].a += err.a * (7.f/16.f);
thiserr[col + 2].r += err.r * (7.f/16.f);
thiserr[col + 2].g += err.g * (7.f/16.f);
thiserr[col + 2].b += err.b * (7.f/16.f);
nexterr[col + 2].a = err.a * (1.f/16.f);
nexterr[col + 2].r = err.r * (1.f/16.f);
nexterr[col + 2].g = err.g * (1.f/16.f);
nexterr[col + 2].b = err.b * (1.f/16.f);
nexterr[col + 1].a += err.a * (5.f/16.f);
nexterr[col + 1].r += err.r * (5.f/16.f);
nexterr[col + 1].g += err.g * (5.f/16.f);
nexterr[col + 1].b += err.b * (5.f/16.f);
nexterr[col ].a += err.a * (3.f/16.f);
nexterr[col ].r += err.r * (3.f/16.f);
nexterr[col ].g += err.g * (3.f/16.f);
nexterr[col ].b += err.b * (3.f/16.f);
} else {
thiserr[col ].a += err.a * (7.f/16.f);
thiserr[col ].r += err.r * (7.f/16.f);
thiserr[col ].g += err.g * (7.f/16.f);
thiserr[col ].b += err.b * (7.f/16.f);
nexterr[col ].a = err.a * (1.f/16.f);
nexterr[col ].r = err.r * (1.f/16.f);
nexterr[col ].g = err.g * (1.f/16.f);
nexterr[col ].b = err.b * (1.f/16.f);
nexterr[col + 1].a += err.a * (5.f/16.f);
nexterr[col + 1].r += err.r * (5.f/16.f);
nexterr[col + 1].g += err.g * (5.f/16.f);
nexterr[col + 1].b += err.b * (5.f/16.f);
nexterr[col + 2].a += err.a * (3.f/16.f);
nexterr[col + 2].r += err.r * (3.f/16.f);
nexterr[col + 2].g += err.g * (3.f/16.f);
nexterr[col + 2].b += err.b * (3.f/16.f);
}
// remapping is done in zig-zag
col += fs_direction;
if (fs_direction > 0) {
if (col >= cols) break;
} else {
if (col < 0) break;
}
} while(1);
f_pixel *const temperr = thiserr;
thiserr = nexterr;
nexterr = temperr;
fs_direction = -fs_direction;
}
input_image->free(MIN(thiserr, nexterr)); // MIN because pointers were swapped
nearest_free(n);
return ok;
}
/* fixed colors are always included in the palette, so it would be wasteful to duplicate them in palette from histogram */
LIQ_NONNULL static void remove_fixed_colors_from_histogram(histogram *hist, const int fixed_colors_count, const f_pixel fixed_colors[], const float target_mse)
{
const float max_difference = MAX(target_mse/2.f, 2.f/256.f/256.f);
if (fixed_colors_count) {
for(int j=0; j < hist->size; j++) {
for(unsigned int i=0; i < fixed_colors_count; i++) {
if (colordifference(hist->achv[j].acolor, fixed_colors[i]) < max_difference) {
hist->achv[j] = hist->achv[--hist->size]; // remove color from histogram by overwriting with the last entry
j--; break; // continue searching histogram
}
}
}
}
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_colors(liq_histogram *input_hist, const liq_attr *options, const liq_histogram_entry entries[], int num_entries, double gamma)
{
if (!CHECK_STRUCT_TYPE(options, liq_attr)) return LIQ_INVALID_POINTER;
if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER;
if (!CHECK_USER_POINTER(entries)) return LIQ_INVALID_POINTER;
if (gamma < 0 || gamma >= 1.0) return LIQ_VALUE_OUT_OF_RANGE;
if (num_entries <= 0 || num_entries > 1<<30) return LIQ_VALUE_OUT_OF_RANGE;
if (input_hist->ignorebits > 0 && input_hist->had_image_added) {
return LIQ_UNSUPPORTED;
}
input_hist->ignorebits = 0;
input_hist->had_image_added = true;
input_hist->gamma = gamma ? gamma : 0.45455;
if (!input_hist->acht) {
input_hist->acht = pam_allocacolorhash(~0, num_entries*num_entries, 0, options->malloc, options->free);
if (!input_hist->acht) {
return LIQ_OUT_OF_MEMORY;
}
}
// Fake image size. It's only for hash size estimates.
if (!input_hist->acht->cols) {
input_hist->acht->cols = num_entries;
}
input_hist->acht->rows += num_entries;
const unsigned int hash_size = input_hist->acht->hash_size;
for(int i=0; i < num_entries; i++) {
const rgba_pixel rgba = {
.r = entries[i].color.r,
.g = entries[i].color.g,
.b = entries[i].color.b,
.a = entries[i].color.a,
};
union rgba_as_int px = {rgba};
unsigned int hash;
if (px.rgba.a) {
hash = px.l % hash_size;
} else {
hash=0; px.l=0;
}
if (!pam_add_to_hash(input_hist->acht, hash, entries[i].count, px, i, num_entries)) {
return LIQ_OUT_OF_MEMORY;
}
}
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_image(liq_histogram *input_hist, const liq_attr *options, liq_image *input_image)
{
if (!CHECK_STRUCT_TYPE(options, liq_attr)) return LIQ_INVALID_POINTER;
if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER;
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return LIQ_INVALID_POINTER;
const unsigned int cols = input_image->width, rows = input_image->height;
if (!input_image->importance_map && options->use_contrast_maps) {
contrast_maps(input_image);
}
input_hist->gamma = input_image->gamma;
for(int i = 0; i < input_image->fixed_colors_count; i++) {
liq_error res = liq_histogram_add_fixed_color_f(input_hist, input_image->fixed_colors[i]);
if (res != LIQ_OK) {
return res;
}
}
/*
** Step 2: attempt to make a histogram of the colors, unclustered.
** If at first we don't succeed, increase ignorebits to increase color
** coherence and try again.
*/
if (liq_progress(options, options->progress_stage1 * 0.4f)) {
return LIQ_ABORTED;
}
const bool all_rows_at_once = liq_image_can_use_rgba_rows(input_image);
// Usual solution is to start from scratch when limit is exceeded, but that's not possible if it's not
// the first image added
const unsigned int max_histogram_entries = input_hist->had_image_added ? ~0 : options->max_histogram_entries;
do {
if (!input_hist->acht) {
input_hist->acht = pam_allocacolorhash(max_histogram_entries, rows*cols, input_hist->ignorebits, options->malloc, options->free);
}
if (!input_hist->acht) return LIQ_OUT_OF_MEMORY;
// histogram uses noise contrast map for importance. Color accuracy in noisy areas is not very important.
// noise map does not include edges to avoid ruining anti-aliasing
for(unsigned int row=0; row < rows; row++) {
bool added_ok;
if (all_rows_at_once) {
added_ok = pam_computeacolorhash(input_hist->acht, (const rgba_pixel *const *)input_image->rows, cols, rows, input_image->importance_map);
if (added_ok) break;
} else {
const rgba_pixel* rows_p[1] = { liq_image_get_row_rgba(input_image, row) };
added_ok = pam_computeacolorhash(input_hist->acht, rows_p, cols, 1, input_image->importance_map ? &input_image->importance_map[row * cols] : NULL);
}
if (!added_ok) {
input_hist->ignorebits++;
liq_verbose_printf(options, " too many colors! Scaling colors to improve clustering... %d", input_hist->ignorebits);
pam_freeacolorhash(input_hist->acht);
input_hist->acht = NULL;
if (liq_progress(options, options->progress_stage1 * 0.6f)) return LIQ_ABORTED;
break;
}
}
} while(!input_hist->acht);
input_hist->had_image_added = true;
liq_image_free_importance_map(input_image);
if (input_image->free_pixels && input_image->f_pixels) {
liq_image_free_rgba_source(input_image); // bow can free the RGBA source if copy has been made in f_pixels
}
return LIQ_OK;
}
LIQ_NONNULL static liq_error finalize_histogram(liq_histogram *input_hist, liq_attr *options, histogram **hist_output)
{
if (liq_progress(options, options->progress_stage1 * 0.9f)) {
return LIQ_ABORTED;
}
if (!input_hist->acht) {
return LIQ_BITMAP_NOT_AVAILABLE;
}
histogram *hist = pam_acolorhashtoacolorhist(input_hist->acht, input_hist->gamma, options->malloc, options->free);
pam_freeacolorhash(input_hist->acht);
input_hist->acht = NULL;
if (!hist) {
return LIQ_OUT_OF_MEMORY;
}
liq_verbose_printf(options, " made histogram...%d colors found", hist->size);
remove_fixed_colors_from_histogram(hist, input_hist->fixed_colors_count, input_hist->fixed_colors, options->target_mse);
*hist_output = hist;
return LIQ_OK;
}
LIQ_NONNULL static void modify_alpha(liq_image *input_image, rgba_pixel *const row_pixels)
{
/* IE6 makes colors with even slightest transparency completely transparent,
thus to improve situation in IE, make colors that are less than ~10% transparent
completely opaque */
const float min_opaque_val = input_image->min_opaque_val;
const float almost_opaque_val = min_opaque_val * 169.f/256.f;
const unsigned int almost_opaque_val_int = (min_opaque_val * 169.f/256.f)*255.f;
for(unsigned int col = 0; col < input_image->width; col++) {
const rgba_pixel px = row_pixels[col];
/* ie bug: to avoid visible step caused by forced opaqueness, linearily raise opaqueness of almost-opaque colors */
if (px.a >= almost_opaque_val_int) {
float al = px.a / 255.f;
al = almost_opaque_val + (al-almost_opaque_val) * (1.f-almost_opaque_val) / (min_opaque_val-almost_opaque_val);
al *= 256.f;
row_pixels[col].a = al >= 255.f ? 255 : al;
}
}
}
/**
Builds two maps:
importance_map - approximation of areas with high-frequency noise, except straight edges. 1=flat, 0=noisy.
edges - noise map including all edges
*/
LIQ_NONNULL static void contrast_maps(liq_image *image)
{
const unsigned int cols = image->width, rows = image->height;
if (cols < 4 || rows < 4 || (3*cols*rows) > LIQ_HIGH_MEMORY_LIMIT) {
return;
}
unsigned char *restrict noise = image->importance_map ? image->importance_map : image->malloc(cols*rows);
image->importance_map = NULL;
unsigned char *restrict edges = image->edges ? image->edges : image->malloc(cols*rows);
image->edges = NULL;
unsigned char *restrict tmp = image->malloc(cols*rows);
if (!noise || !edges || !tmp || !liq_image_get_row_f_init(image)) {
image->free(noise);
image->free(edges);
image->free(tmp);
return;
}
const f_pixel *curr_row, *prev_row, *next_row;
curr_row = prev_row = next_row = liq_image_get_row_f(image, 0);
for (unsigned int j=0; j < rows; j++) {
prev_row = curr_row;
curr_row = next_row;
next_row = liq_image_get_row_f(image, MIN(rows-1,j+1));
f_pixel prev, curr = curr_row[0], next=curr;
for (unsigned int i=0; i < cols; i++) {
prev=curr;
curr=next;
next = curr_row[MIN(cols-1,i+1)];
// contrast is difference between pixels neighbouring horizontally and vertically
const float a = fabsf(prev.a+next.a - curr.a*2.f),
r = fabsf(prev.r+next.r - curr.r*2.f),
g = fabsf(prev.g+next.g - curr.g*2.f),
b = fabsf(prev.b+next.b - curr.b*2.f);
const f_pixel prevl = prev_row[i];
const f_pixel nextl = next_row[i];
const float a1 = fabsf(prevl.a+nextl.a - curr.a*2.f),
r1 = fabsf(prevl.r+nextl.r - curr.r*2.f),
g1 = fabsf(prevl.g+nextl.g - curr.g*2.f),
b1 = fabsf(prevl.b+nextl.b - curr.b*2.f);
const float horiz = MAX(MAX(a,r),MAX(g,b));
const float vert = MAX(MAX(a1,r1),MAX(g1,b1));
const float edge = MAX(horiz,vert);
float z = edge - fabsf(horiz-vert)*.5f;
z = 1.f - MAX(z,MIN(horiz,vert));
z *= z; // noise is amplified
z *= z;
// 85 is about 1/3rd of weight (not 0, because noisy pixels still need to be included, just not as precisely).
const unsigned int z_int = 85 + (unsigned int)(z * 171.f);
noise[j*cols+i] = MIN(z_int, 255);
const int e_int = 255 - (int)(edge * 256.f);
edges[j*cols+i] = e_int > 0 ? MIN(e_int, 255) : 0;
}
}
// noise areas are shrunk and then expanded to remove thin edges from the map
liq_max3(noise, tmp, cols, rows);
liq_max3(tmp, noise, cols, rows);
liq_blur(noise, tmp, noise, cols, rows, 3);
liq_max3(noise, tmp, cols, rows);
liq_min3(tmp, noise, cols, rows);
liq_min3(noise, tmp, cols, rows);
liq_min3(tmp, noise, cols, rows);
liq_min3(edges, tmp, cols, rows);
liq_max3(tmp, edges, cols, rows);
for(unsigned int i=0; i < cols*rows; i++) edges[i] = MIN(noise[i], edges[i]);
image->free(tmp);
image->importance_map = noise;
image->edges = edges;
}
/**
* Builds map of neighbor pixels mapped to the same palette entry
*
* For efficiency/simplicity it mainly looks for same consecutive pixels horizontally
* and peeks 1 pixel above/below. Full 2d algorithm doesn't improve it significantly.
* Correct flood fill doesn't have visually good properties.
*/
LIQ_NONNULL static void update_dither_map(liq_image *input_image, unsigned char *const *const row_pointers, colormap *map)
{
const unsigned int width = input_image->width;
const unsigned int height = input_image->height;
unsigned char *const edges = input_image->edges;
for(unsigned int row=0; row < height; row++) {
unsigned char lastpixel = row_pointers[row][0];
unsigned int lastcol=0;
for(unsigned int col=1; col < width; col++) {
const unsigned char px = row_pointers[row][col];
if (input_image->background && map->palette[px].acolor.a < 1.f/256.f) {
// Transparency may or may not create an edge. When there's an explicit background set, assume no edge.
continue;
}
if (px != lastpixel || col == width-1) {
int neighbor_count = 10 * (col-lastcol);
unsigned int i=lastcol;
while(i < col) {
if (row > 0) {
unsigned char pixelabove = row_pointers[row-1][i];
if (pixelabove == lastpixel) neighbor_count += 15;
}
if (row < height-1) {
unsigned char pixelbelow = row_pointers[row+1][i];
if (pixelbelow == lastpixel) neighbor_count += 15;
}
i++;
}
while(lastcol <= col) {
int e = edges[row*width + lastcol];
edges[row*width + lastcol++] = (e+128) * (255.f/(255+128)) * (1.f - 20.f / (20 + neighbor_count));
}
lastpixel = px;
}
}
}
input_image->dither_map = input_image->edges;
input_image->edges = NULL;
}
/**
* Palette can be NULL, in which case it creates a new palette from scratch.
*/
static colormap *add_fixed_colors_to_palette(colormap *palette, const int max_colors, const f_pixel fixed_colors[], const int fixed_colors_count, void* (*malloc)(size_t), void (*free)(void*))
{
if (!fixed_colors_count) return palette;
colormap *newpal = pam_colormap(MIN(max_colors, (palette ? palette->colors : 0) + fixed_colors_count), malloc, free);
unsigned int i=0;
if (palette && fixed_colors_count < max_colors) {
unsigned int palette_max = MIN(palette->colors, max_colors - fixed_colors_count);
for(; i < palette_max; i++) {
newpal->palette[i] = palette->palette[i];
}
}
for(int j=0; j < MIN(max_colors, fixed_colors_count); j++) {
newpal->palette[i++] = (colormap_item){
.acolor = fixed_colors[j],
.fixed = true,
};
}
if (palette) pam_freecolormap(palette);
return newpal;
}
LIQ_NONNULL static void adjust_histogram_callback(hist_item *item, float diff)
{
item->adjusted_weight = (item->perceptual_weight+item->adjusted_weight) * (sqrtf(1.f+diff));
}
/**
Repeats mediancut with different histogram weights to find palette with minimum error.
feedback_loop_trials controls how long the search will take. < 0 skips the iteration.
*/
static colormap *find_best_palette(histogram *hist, const liq_attr *options, const double max_mse, const f_pixel fixed_colors[], const unsigned int fixed_colors_count, double *palette_error_p)
{
unsigned int max_colors = options->max_colors;
// if output is posterized it doesn't make sense to aim for perfrect colors, so increase target_mse
// at this point actual gamma is not set, so very conservative posterization estimate is used
const double target_mse = MIN(max_mse, MAX(options->target_mse, pow((1<<options->min_posterization_output)/1024.0, 2)));
int feedback_loop_trials = options->feedback_loop_trials;
if (hist->size > 5000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;}
if (hist->size > 25000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;}
if (hist->size > 50000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;}
if (hist->size > 100000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;}
colormap *acolormap = NULL;
double least_error = MAX_DIFF;
double target_mse_overshoot = feedback_loop_trials>0 ? 1.05 : 1.0;
const float total_trials = (float)(feedback_loop_trials>0?feedback_loop_trials:1);
do {
colormap *newmap;
if (hist->size && fixed_colors_count < max_colors) {
newmap = mediancut(hist, max_colors-fixed_colors_count, target_mse * target_mse_overshoot, MAX(MAX(45.0/65536.0, target_mse), least_error)*1.2,
options->malloc, options->free);
} else {
feedback_loop_trials = 0;
newmap = NULL;
}
newmap = add_fixed_colors_to_palette(newmap, max_colors, fixed_colors, fixed_colors_count, options->malloc, options->free);
if (!newmap) {
return NULL;
}
if (feedback_loop_trials <= 0) {
return newmap;
}
// after palette has been created, total error (MSE) is calculated to keep the best palette
// at the same time K-Means iteration is done to improve the palette
// and histogram weights are adjusted based on remapping error to give more weight to poorly matched colors
const bool first_run_of_target_mse = !acolormap && target_mse > 0;
double total_error = kmeans_do_iteration(hist, newmap, first_run_of_target_mse ? NULL : adjust_histogram_callback);
// goal is to increase quality or to reduce number of colors used if quality is good enough
if (!acolormap || total_error < least_error || (total_error <= target_mse && newmap->colors < max_colors)) {
if (acolormap) pam_freecolormap(acolormap);
acolormap = newmap;
if (total_error < target_mse && total_error > 0) {
// K-Means iteration improves quality above what mediancut aims for
// this compensates for it, making mediancut aim for worse
target_mse_overshoot = MIN(target_mse_overshoot*1.25, target_mse/total_error);
}
least_error = total_error;
// if number of colors could be reduced, try to keep it that way
// but allow extra color as a bit of wiggle room in case quality can be improved too
max_colors = MIN(newmap->colors+1, max_colors);
feedback_loop_trials -= 1; // asymptotic improvement could make it go on forever
} else {
for(unsigned int j=0; j < hist->size; j++) {
hist->achv[j].adjusted_weight = (hist->achv[j].perceptual_weight + hist->achv[j].adjusted_weight)/2.0;
}
target_mse_overshoot = 1.0;
feedback_loop_trials -= 6;
// if error is really bad, it's unlikely to improve, so end sooner
if (total_error > least_error*4) feedback_loop_trials -= 3;
pam_freecolormap(newmap);
}
float fraction_done = 1.f-MAX(0.f, feedback_loop_trials/total_trials);
if (liq_progress(options, options->progress_stage1 + fraction_done * options->progress_stage2)) break;
liq_verbose_printf(options, " selecting colors...%d%%", (int)(100.f * fraction_done));
}
while(feedback_loop_trials > 0);
*palette_error_p = least_error;
return acolormap;
}
static colormap *histogram_to_palette(const histogram *hist, const liq_attr *options) {
if (!hist->size) {
return NULL;
}
colormap *acolormap = pam_colormap(hist->size, options->malloc, options->free);
for(unsigned int i=0; i < hist->size; i++) {
acolormap->palette[i].acolor = hist->achv[i].acolor;
acolormap->palette[i].popularity = hist->achv[i].perceptual_weight;
}
return acolormap;
}
LIQ_NONNULL static liq_error pngquant_quantize(histogram *hist, const liq_attr *options, const int fixed_colors_count, const f_pixel fixed_colors[], const double gamma, bool fixed_result_colors, liq_result **result_output)
{
colormap *acolormap;
double palette_error = -1;
assert((verbose_print(options, "SLOW debug checks enabled. Recompile with NDEBUG for normal operation."),1));
const bool few_input_colors = hist->size+fixed_colors_count <= options->max_colors;
if (liq_progress(options, options->progress_stage1)) return LIQ_ABORTED;
// If image has few colors to begin with (and no quality degradation is required)
// then it's possible to skip quantization entirely
if (few_input_colors && options->target_mse == 0) {
acolormap = add_fixed_colors_to_palette(histogram_to_palette(hist, options), options->max_colors, fixed_colors, fixed_colors_count, options->malloc, options->free);
palette_error = 0;
} else {
const double max_mse = options->max_mse * (few_input_colors ? 0.33 : 1.0); // when degrading image that's already paletted, require much higher improvement, since pal2pal often looks bad and there's little gain
acolormap = find_best_palette(hist, options, max_mse, fixed_colors, fixed_colors_count, &palette_error);
if (!acolormap) {
return LIQ_VALUE_OUT_OF_RANGE;
}
// K-Means iteration approaches local minimum for the palette
double iteration_limit = options->kmeans_iteration_limit;
unsigned int iterations = options->kmeans_iterations;
if (!iterations && palette_error < 0 && max_mse < MAX_DIFF) iterations = 1; // otherwise total error is never calculated and MSE limit won't work
if (iterations) {
// likely_colormap_index (used and set in kmeans_do_iteration) can't point to index outside colormap
if (acolormap->colors < 256) for(unsigned int j=0; j < hist->size; j++) {
if (hist->achv[j].tmp.likely_colormap_index >= acolormap->colors) {
hist->achv[j].tmp.likely_colormap_index = 0; // actual value doesn't matter, as the guess is out of date anyway
}
}
if (hist->size > 5000) {iterations = (iterations*3 + 3)/4;}
if (hist->size > 25000) {iterations = (iterations*3 + 3)/4;}
if (hist->size > 50000) {iterations = (iterations*3 + 3)/4;}
if (hist->size > 100000) {iterations = (iterations*3 + 3)/4; iteration_limit *= 2;}
verbose_print(options, " moving colormap towards local minimum");
double previous_palette_error = MAX_DIFF;
for(unsigned int i=0; i < iterations; i++) {
palette_error = kmeans_do_iteration(hist, acolormap, NULL);
if (liq_progress(options, options->progress_stage1 + options->progress_stage2 + (i * options->progress_stage3 * 0.9f) / iterations)) {
break;
}
if (fabs(previous_palette_error-palette_error) < iteration_limit) {
break;
}
if (palette_error > max_mse*1.5) { // probably hopeless
if (palette_error > max_mse*3.0) break; // definitely hopeless
i++;
}
previous_palette_error = palette_error;
}
}
if (palette_error > max_mse) {
liq_verbose_printf(options, " image degradation MSE=%.3f (Q=%d) exceeded limit of %.3f (%d)",
mse_to_standard_mse(palette_error), mse_to_quality(palette_error),
mse_to_standard_mse(max_mse), mse_to_quality(max_mse));
pam_freecolormap(acolormap);
return LIQ_QUALITY_TOO_LOW;
}
}
if (liq_progress(options, options->progress_stage1 + options->progress_stage2 + options->progress_stage3 * 0.95f)) {
pam_freecolormap(acolormap);
return LIQ_ABORTED;
}
sort_palette(acolormap, options);
// If palette was created from a multi-image histogram,
// then it shouldn't be optimized for one image during remapping
if (fixed_result_colors) {
for(unsigned int i=0; i < acolormap->colors; i++) {
acolormap->palette[i].fixed = true;
}
}
liq_result *result = options->malloc(sizeof(liq_result));
if (!result) return LIQ_OUT_OF_MEMORY;
*result = (liq_result){
.magic_header = liq_result_magic,
.malloc = options->malloc,
.free = options->free,
.palette = acolormap,
.palette_error = palette_error,
.use_dither_map = options->use_dither_map,
.gamma = gamma,
.min_posterization_output = options->min_posterization_output,
};
*result_output = result;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_write_remapped_image(liq_result *result, liq_image *input_image, void *buffer, size_t buffer_size)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) {
return LIQ_INVALID_POINTER;
}
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) {
return LIQ_INVALID_POINTER;
}
if (!CHECK_USER_POINTER(buffer)) {
return LIQ_INVALID_POINTER;
}
const size_t required_size = (size_t)input_image->width * (size_t)input_image->height;
if (buffer_size < required_size) {
return LIQ_BUFFER_TOO_SMALL;
}
LIQ_ARRAY(unsigned char *, rows, input_image->height);
unsigned char *buffer_bytes = buffer;
for(unsigned int i=0; i < input_image->height; i++) {
rows[i] = &buffer_bytes[input_image->width * i];
}
return liq_write_remapped_image_rows(result, input_image, rows);
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_write_remapped_image_rows(liq_result *quant, liq_image *input_image, unsigned char **row_pointers)
{
if (!CHECK_STRUCT_TYPE(quant, liq_result)) return LIQ_INVALID_POINTER;
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return LIQ_INVALID_POINTER;
for(unsigned int i=0; i < input_image->height; i++) {
if (!CHECK_USER_POINTER(row_pointers+i) || !CHECK_USER_POINTER(row_pointers[i])) return LIQ_INVALID_POINTER;
}
if (quant->remapping) {
liq_remapping_result_destroy(quant->remapping);
}
liq_remapping_result *const result = quant->remapping = liq_remapping_result_create(quant);
if (!result) return LIQ_OUT_OF_MEMORY;
if (!input_image->edges && !input_image->dither_map && quant->use_dither_map) {
contrast_maps(input_image);
}
if (liq_remap_progress(result, result->progress_stage1 * 0.25f)) {
return LIQ_ABORTED;
}
/*
** Step 4: map the colors in the image to their closest match in the
** new colormap, and write 'em out.
*/
float remapping_error = result->palette_error;
if (result->dither_level == 0) {
set_rounded_palette(&result->int_palette, result->palette, result->gamma, quant->min_posterization_output);
remapping_error = remap_to_palette(input_image, row_pointers, result->palette);
} else {
const bool is_image_huge = (input_image->width * input_image->height) > 2000 * 2000;
const bool allow_dither_map = result->use_dither_map == 2 || (!is_image_huge && result->use_dither_map);
const bool generate_dither_map = allow_dither_map && (input_image->edges && !input_image->dither_map);
if (generate_dither_map) {
// If dithering (with dither map) is required, this image is used to find areas that require dithering
remapping_error = remap_to_palette(input_image, row_pointers, result->palette);
update_dither_map(input_image, row_pointers, result->palette);
}
if (liq_remap_progress(result, result->progress_stage1 * 0.5f)) {
return LIQ_ABORTED;
}
// remapping above was the last chance to do K-Means iteration, hence the final palette is set after remapping
set_rounded_palette(&result->int_palette, result->palette, result->gamma, quant->min_posterization_output);
if (!remap_to_palette_floyd(input_image, row_pointers, result, MAX(remapping_error*2.4, 16.f/256.f), generate_dither_map)) {
return LIQ_ABORTED;
}
}
// remapping error from dithered image is absurd, so always non-dithered value is used
// palette_error includes some perceptual weighting from histogram which is closer correlated with dssim
// so that should be used when possible.
if (result->palette_error < 0) {
result->palette_error = remapping_error;
}
return LIQ_OK;
}
LIQ_EXPORT int liq_version() {
return LIQ_VERSION;
}
|
pr79429.c | /* PR c++/79429 */
#pragma omp target /* { dg-error "expected declaration specifiers" } */
|
GB_unop__identity_int32_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int32_uint8)
// op(A') function: GB (_unop_tran__identity_int32_uint8)
// C type: int32_t
// A type: uint8_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = (int32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = (int32_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int32_uint8)
(
int32_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
int32_t z = (int32_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
int32_t z = (int32_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int32_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_3x3_pack8to1_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd43_transform_kernel_pack8to1_int8_msa(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt)
{
// winograd43 transform kernel
Mat kernel_tm(6 * 6, inch, outch, (size_t)2u);
const short ktm[6][3] = {
{6, 0, 0},
{-4, -4, -4},
{-4, 4, -4},
{1, 2, 4},
{1, -2, 4},
{0, 0, 6}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
short* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = 4b-8a-inch/8a-36-outch/4b
kernel_tm_pack8to1.create(8 * inch / 8, 36, outch / 4 + outch % 4, (size_t)2u * 4, 4);
int p = 0;
for (; p + 3 < outch; p += 4)
{
const Mat k0 = kernel_tm.channel(p);
const Mat k1 = kernel_tm.channel(p + 1);
const Mat k2 = kernel_tm.channel(p + 2);
const Mat k3 = kernel_tm.channel(p + 3);
Mat g0 = kernel_tm_pack8to1.channel(p / 4);
for (int k = 0; k < 36; k++)
{
short* g00 = g0.row<short>(k);
for (int q = 0; q + 7 < inch; q += 8)
{
for (int i = 0; i < 8; i++)
{
g00[0] = k0.row<const short>(q + i)[k];
g00[1] = k1.row<const short>(q + i)[k];
g00[2] = k2.row<const short>(q + i)[k];
g00[3] = k3.row<const short>(q + i)[k];
g00 += 4;
}
}
}
}
for (; p < outch; p++)
{
const Mat k0 = kernel_tm.channel(p);
Mat g0 = kernel_tm_pack8to1.channel(p / 4 + p % 4);
for (int k = 0; k < 36; k++)
{
short* g00 = g0.row<short>(k);
for (int q = 0; q + 7 < inch; q += 8)
{
for (int i = 0; i < 8; i++)
{
g00[0] = k0.row<const short>(q + i)[k];
g00 += 1;
}
}
}
}
}
static void conv3x3s1_winograd43_pack8to1_int8_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
// size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator);
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
short tmp[6][6][8];
// tile
for (int i = 0; i < h_tm / 6; i++)
{
for (int j = 0; j < w_tm / 6; j++)
{
const signed char* r0 = img0.row<const signed char>(i * 4) + (j * 4) * 8;
for (int m = 0; m < 6; m++)
{
v16i8 _r00_01 = __msa_ld_b(r0, 0);
v16i8 _r02_03 = __msa_ld_b(r0 + 16, 0);
v16i8 _r04_05 = __msa_ld_b(r0 + 32, 0);
v16i8 _extr0001 = __msa_clti_s_b(_r00_01, 0);
v16i8 _extr0203 = __msa_clti_s_b(_r02_03, 0);
v16i8 _extr0405 = __msa_clti_s_b(_r04_05, 0);
v8i16 _r00 = (v8i16)__msa_ilvr_b(_extr0001, _r00_01);
v8i16 _r01 = (v8i16)__msa_ilvl_b(_extr0001, _r00_01);
v8i16 _r02 = (v8i16)__msa_ilvr_b(_extr0203, _r02_03);
v8i16 _r03 = (v8i16)__msa_ilvl_b(_extr0203, _r02_03);
v8i16 _r04 = (v8i16)__msa_ilvr_b(_extr0405, _r04_05);
v8i16 _r05 = (v8i16)__msa_ilvl_b(_extr0405, _r04_05);
v8i16 _v5 = __msa_fill_h(5);
v8i16 _tmp0m = __msa_subv_h(__msa_addv_h(__msa_slli_h(_r00, 2), _r04), __msa_mulv_h(_r02, _v5));
v8i16 _tmp1m = __msa_subv_h(__msa_addv_h(_r04, _r03), __msa_slli_h(__msa_addv_h(_r01, _r02), 2));
v8i16 _tmp2m = __msa_addv_h(__msa_subv_h(_r04, _r03), __msa_slli_h(__msa_subv_h(_r01, _r02), 2));
v8i16 _tmp3m = __msa_subv_h(__msa_subv_h(_r04, _r02), __msa_slli_h(__msa_subv_h(_r01, _r03), 1));
v8i16 _tmp4m = __msa_addv_h(__msa_subv_h(_r04, _r02), __msa_slli_h(__msa_subv_h(_r01, _r03), 1));
v8i16 _tmp5m = __msa_subv_h(__msa_addv_h(__msa_slli_h(_r01, 2), _r05), __msa_mulv_h(_r03, _v5));
__msa_st_h(_tmp0m, tmp[0][m], 0);
__msa_st_h(_tmp1m, tmp[1][m], 0);
__msa_st_h(_tmp2m, tmp[2][m], 0);
__msa_st_h(_tmp3m, tmp[3][m], 0);
__msa_st_h(_tmp4m, tmp[4][m], 0);
__msa_st_h(_tmp5m, tmp[5][m], 0);
r0 += w * 8;
}
short* r0_tm_0 = (short*)img0_tm + (i * w_tm / 6 + j) * 8;
short* r0_tm_1 = r0_tm_0 + tiles * 8;
short* r0_tm_2 = r0_tm_0 + tiles * 16;
short* r0_tm_3 = r0_tm_0 + tiles * 24;
short* r0_tm_4 = r0_tm_0 + tiles * 32;
short* r0_tm_5 = r0_tm_0 + tiles * 40;
for (int m = 0; m < 6; m++)
{
v8i16 _tmp00 = __msa_ld_h(tmp[m][0], 0);
v8i16 _tmp01 = __msa_ld_h(tmp[m][1], 0);
v8i16 _tmp02 = __msa_ld_h(tmp[m][2], 0);
v8i16 _tmp03 = __msa_ld_h(tmp[m][3], 0);
v8i16 _tmp04 = __msa_ld_h(tmp[m][4], 0);
v8i16 _tmp05 = __msa_ld_h(tmp[m][5], 0);
v8i16 _v5 = __msa_fill_h(5);
v8i16 _r0tm0 = __msa_subv_h(__msa_addv_h(__msa_slli_h(_tmp00, 2), _tmp04), __msa_mulv_h(_tmp02, _v5));
v8i16 _r0tm1 = __msa_subv_h(__msa_addv_h(_tmp04, _tmp03), __msa_slli_h(__msa_addv_h(_tmp01, _tmp02), 2));
v8i16 _r0tm2 = __msa_addv_h(__msa_subv_h(_tmp04, _tmp03), __msa_slli_h(__msa_subv_h(_tmp01, _tmp02), 2));
v8i16 _r0tm3 = __msa_subv_h(__msa_subv_h(_tmp04, _tmp02), __msa_slli_h(__msa_subv_h(_tmp01, _tmp03), 1));
v8i16 _r0tm4 = __msa_addv_h(__msa_subv_h(_tmp04, _tmp02), __msa_slli_h(__msa_subv_h(_tmp01, _tmp03), 1));
v8i16 _r0tm5 = __msa_subv_h(__msa_addv_h(__msa_slli_h(_tmp01, 2), _tmp05), __msa_mulv_h(_tmp03, _v5));
__msa_st_h(_r0tm0, r0_tm_0, 0);
__msa_st_h(_r0tm1, r0_tm_1, 0);
__msa_st_h(_r0tm2, r0_tm_2, 0);
__msa_st_h(_r0tm3, r0_tm_3, 0);
__msa_st_h(_r0tm4, r0_tm_4, 0);
__msa_st_h(_r0tm5, r0_tm_5, 0);
r0_tm_0 += tiles * 48;
r0_tm_1 += tiles * 48;
r0_tm_2 += tiles * 48;
r0_tm_3 += tiles * 48;
r0_tm_4 += tiles * 48;
r0_tm_5 += tiles * 48;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = h_tm / 6 * w_tm / 6;
// permute
// bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 36; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 1 < tiles; i += 2)
{
short* tmpptr = tm2.row<short>(i / 2);
const short* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
v8i16 _r0 = __msa_ld_h(r0, 0);
v8i16 _r1 = __msa_ld_h(r0 + 8, 0);
__msa_st_h(_r0, tmpptr, 0);
__msa_st_h(_r1, tmpptr + 8, 0);
r0 += bottom_blob_tm.cstep * 8;
tmpptr += 16;
}
}
for (; i < tiles; i++)
{
short* tmpptr = tm2.row<short>(i / 2 + i % 2);
const short* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
v8i16 _r0 = __msa_ld_h(r0, 0);
__msa_st_h(_r0, tmpptr, 0);
r0 += bottom_blob_tm.cstep * 8;
tmpptr += 8;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 36, outch, 4u, 1, opt.workspace_allocator);
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
int* output0_tm = top_blob_tm.channel(p);
int* output1_tm = top_blob_tm.channel(p + 1);
int* output2_tm = top_blob_tm.channel(p + 2);
int* output3_tm = top_blob_tm.channel(p + 3);
const Mat kernel0_tm = kernel_tm.channel(p / 4);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 1 < tiles; i += 2)
{
const short* r0 = bb2.row<const short>(i / 2);
const short* k0 = kernel0_tm.row<const short>(r);
int nn = inch; // inch always > 0
v4i32 _sum0 = __msa_fill_w(0);
v4i32 _sum1 = __msa_fill_w(0);
v4i32 _sum2 = __msa_fill_w(0);
v4i32 _sum3 = __msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(r0 + 64);
__builtin_prefetch(k0 + 128);
v8i16 _w0 = __msa_ld_h(k0, 0);
v8i16 _w1 = __msa_ld_h(k0 + 8, 0);
v8i16 _w2 = __msa_ld_h(k0 + 16, 0);
v8i16 _w3 = __msa_ld_h(k0 + 24, 0);
v8i16 _extw0 = __msa_clti_s_h(_w0, 0);
v8i16 _extw1 = __msa_clti_s_h(_w1, 0);
v8i16 _extw2 = __msa_clti_s_h(_w2, 0);
v8i16 _extw3 = __msa_clti_s_h(_w3, 0);
v4i32 _w0l = (v4i32)__msa_ilvr_h(_extw0, _w0);
v4i32 _w0h = (v4i32)__msa_ilvl_h(_extw0, _w0);
v4i32 _w1l = (v4i32)__msa_ilvr_h(_extw1, _w1);
v4i32 _w1h = (v4i32)__msa_ilvl_h(_extw1, _w1);
v4i32 _w2l = (v4i32)__msa_ilvr_h(_extw2, _w2);
v4i32 _w2h = (v4i32)__msa_ilvl_h(_extw2, _w2);
v4i32 _w3l = (v4i32)__msa_ilvr_h(_extw3, _w3);
v4i32 _w3h = (v4i32)__msa_ilvl_h(_extw3, _w3);
v4i32 _val0_0 = __msa_fill_w(r0[0]);
v4i32 _val0_1 = __msa_fill_w(r0[1]);
v4i32 _val0_2 = __msa_fill_w(r0[2]);
v4i32 _val0_3 = __msa_fill_w(r0[3]);
v4i32 _val0_4 = __msa_fill_w(r0[4]);
v4i32 _val0_5 = __msa_fill_w(r0[5]);
v4i32 _val0_6 = __msa_fill_w(r0[6]);
v4i32 _val0_7 = __msa_fill_w(r0[7]);
v4i32 _val1_0 = __msa_fill_w(r0[8]);
v4i32 _val1_1 = __msa_fill_w(r0[9]);
v4i32 _val1_2 = __msa_fill_w(r0[10]);
v4i32 _val1_3 = __msa_fill_w(r0[11]);
v4i32 _val1_4 = __msa_fill_w(r0[12]);
v4i32 _val1_5 = __msa_fill_w(r0[13]);
v4i32 _val1_6 = __msa_fill_w(r0[14]);
v4i32 _val1_7 = __msa_fill_w(r0[15]);
_sum0 = __msa_maddv_w(_sum0, _w0l, _val0_0);
_sum1 = __msa_maddv_w(_sum1, _w0h, _val0_1);
_sum2 = __msa_maddv_w(_sum2, _w0l, _val1_0);
_sum3 = __msa_maddv_w(_sum3, _w0h, _val1_1);
_sum0 = __msa_maddv_w(_sum0, _w1l, _val0_2);
_sum1 = __msa_maddv_w(_sum1, _w1h, _val0_3);
_sum2 = __msa_maddv_w(_sum2, _w1l, _val1_2);
_sum3 = __msa_maddv_w(_sum3, _w1h, _val1_3);
_sum0 = __msa_maddv_w(_sum0, _w2l, _val0_4);
_sum1 = __msa_maddv_w(_sum1, _w2h, _val0_5);
_sum2 = __msa_maddv_w(_sum2, _w2l, _val1_4);
_sum3 = __msa_maddv_w(_sum3, _w2h, _val1_5);
_sum0 = __msa_maddv_w(_sum0, _w3l, _val0_6);
_sum1 = __msa_maddv_w(_sum1, _w3h, _val0_7);
_sum2 = __msa_maddv_w(_sum2, _w3l, _val1_6);
_sum3 = __msa_maddv_w(_sum3, _w3h, _val1_7);
r0 += 16;
k0 += 32;
}
_sum0 = __msa_addv_w(_sum0, _sum1);
_sum2 = __msa_addv_w(_sum2, _sum3);
int sum[8];
__msa_st_w(_sum0, sum, 0);
__msa_st_w(_sum2, sum + 4, 0);
output0_tm[0] = sum[0];
output1_tm[0] = sum[1];
output2_tm[0] = sum[2];
output3_tm[0] = sum[3];
output0_tm[1] = sum[4];
output1_tm[1] = sum[5];
output2_tm[1] = sum[6];
output3_tm[1] = sum[7];
output0_tm += 2;
output1_tm += 2;
output2_tm += 2;
output3_tm += 2;
}
for (; i < tiles; i++)
{
const short* r0 = bb2.row<const short>(i / 2 + i % 2);
const short* k0 = kernel0_tm.row<const short>(r);
int nn = inch; // inch always > 0
v4i32 _sum0 = __msa_fill_w(0);
v4i32 _sum1 = __msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(r0 + 32);
__builtin_prefetch(k0 + 128);
v8i16 _w0 = __msa_ld_h(k0, 0);
v8i16 _w1 = __msa_ld_h(k0 + 8, 0);
v8i16 _w2 = __msa_ld_h(k0 + 16, 0);
v8i16 _w3 = __msa_ld_h(k0 + 24, 0);
v8i16 _extw0 = __msa_clti_s_h(_w0, 0);
v8i16 _extw1 = __msa_clti_s_h(_w1, 0);
v8i16 _extw2 = __msa_clti_s_h(_w2, 0);
v8i16 _extw3 = __msa_clti_s_h(_w3, 0);
v4i32 _w0l = (v4i32)__msa_ilvr_h(_extw0, _w0);
v4i32 _w0h = (v4i32)__msa_ilvl_h(_extw0, _w0);
v4i32 _w1l = (v4i32)__msa_ilvr_h(_extw1, _w1);
v4i32 _w1h = (v4i32)__msa_ilvl_h(_extw1, _w1);
v4i32 _w2l = (v4i32)__msa_ilvr_h(_extw2, _w2);
v4i32 _w2h = (v4i32)__msa_ilvl_h(_extw2, _w2);
v4i32 _w3l = (v4i32)__msa_ilvr_h(_extw3, _w3);
v4i32 _w3h = (v4i32)__msa_ilvl_h(_extw3, _w3);
v4i32 _val0 = __msa_fill_w(r0[0]);
v4i32 _val1 = __msa_fill_w(r0[1]);
v4i32 _val2 = __msa_fill_w(r0[2]);
v4i32 _val3 = __msa_fill_w(r0[3]);
v4i32 _val4 = __msa_fill_w(r0[4]);
v4i32 _val5 = __msa_fill_w(r0[5]);
v4i32 _val6 = __msa_fill_w(r0[6]);
v4i32 _val7 = __msa_fill_w(r0[7]);
_sum0 = __msa_maddv_w(_sum0, _w0l, _val0);
_sum1 = __msa_maddv_w(_sum1, _w0h, _val1);
_sum0 = __msa_maddv_w(_sum0, _w1l, _val2);
_sum1 = __msa_maddv_w(_sum1, _w1h, _val3);
_sum0 = __msa_maddv_w(_sum0, _w2l, _val4);
_sum1 = __msa_maddv_w(_sum1, _w2h, _val5);
_sum0 = __msa_maddv_w(_sum0, _w3l, _val6);
_sum1 = __msa_maddv_w(_sum1, _w3h, _val7);
r0 += 8;
k0 += 32;
}
_sum0 = __msa_addv_w(_sum0, _sum1);
int sum[4];
__msa_st_w(_sum0, sum, 0);
output0_tm[0] = sum[0];
output1_tm[0] = sum[1];
output2_tm[0] = sum[2];
output3_tm[0] = sum[3];
output0_tm += 1;
output1_tm += 1;
output2_tm += 1;
output3_tm += 1;
}
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
int* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p / 4 + p % 4);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 1 < tiles; i += 2)
{
const short* r0 = bb2.row<const short>(i / 2);
const short* k0 = kernel0_tm.row<const short>(r);
v4i32 _sum0 = __msa_fill_w(0);
v4i32 _sum1 = __msa_fill_w(0);
v4i32 _sum2 = __msa_fill_w(0);
v4i32 _sum3 = __msa_fill_w(0);
for (int q = 0; q < inch; q++)
{
__builtin_prefetch(r0 + 32);
__builtin_prefetch(k0 + 64);
v8i16 _val0 = __msa_ld_h(r0, 0);
v8i16 _val1 = __msa_ld_h(r0 + 8, 0);
v8i16 _extval0 = __msa_clti_s_h(_val0, 0);
v8i16 _extval1 = __msa_clti_s_h(_val1, 0);
v4i32 _val0l = (v4i32)__msa_ilvr_h(_extval0, _val0);
v4i32 _val0h = (v4i32)__msa_ilvl_h(_extval0, _val0);
v4i32 _val1l = (v4i32)__msa_ilvr_h(_extval1, _val1);
v4i32 _val1h = (v4i32)__msa_ilvl_h(_extval1, _val1);
v8i16 _w0 = __msa_ld_h(k0, 0);
v8i16 _extw0 = __msa_clti_s_h(_w0, 0);
v4i32 _w0l = (v4i32)__msa_ilvr_h(_extw0, _w0);
v4i32 _w0h = (v4i32)__msa_ilvl_h(_extw0, _w0);
_sum0 = __msa_maddv_w(_sum0, _w0l, _val0l);
_sum1 = __msa_maddv_w(_sum1, _w0h, _val0h);
_sum2 = __msa_maddv_w(_sum2, _w0l, _val1l);
_sum3 = __msa_maddv_w(_sum3, _w0h, _val1h);
k0 += 8;
r0 += 16;
}
_sum0 = __msa_addv_w(_sum0, _sum1);
_sum2 = __msa_addv_w(_sum2, _sum3);
output0_tm[0] = __msa_reduce_add_w(_sum0);
output0_tm[1] = __msa_reduce_add_w(_sum2);
output0_tm += 2;
}
for (; i < tiles; i++)
{
const short* r0 = bb2.row<const short>(i / 2 + i % 2);
const short* k0 = kernel0_tm.row<const short>(r);
v4i32 _sum0 = __msa_fill_w(0);
v4i32 _sum1 = __msa_fill_w(0);
for (int q = 0; q < inch; q++)
{
__builtin_prefetch(r0 + 32);
__builtin_prefetch(k0 + 32);
v8i16 _val = __msa_ld_h(r0, 0);
v8i16 _extval = __msa_clti_s_h(_val, 0);
v4i32 _vall = (v4i32)__msa_ilvr_h(_extval, _val);
v4i32 _valh = (v4i32)__msa_ilvl_h(_extval, _val);
v8i16 _w0 = __msa_ld_h(k0, 0);
v8i16 _extw0 = __msa_clti_s_h(_w0, 0);
v4i32 _w0l = (v4i32)__msa_ilvr_h(_extw0, _w0);
v4i32 _w0h = (v4i32)__msa_ilvl_h(_extw0, _w0);
_sum0 = __msa_maddv_w(_sum0, _w0l, _vall);
_sum1 = __msa_maddv_w(_sum1, _w0h, _valh);
k0 += 8;
r0 += 8;
}
_sum0 = __msa_addv_w(_sum0, _sum1);
output0_tm[0] = __msa_reduce_add_w(_sum0);
output0_tm++;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 4u, 1, opt.workspace_allocator);
}
{
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
int tmp[4][6];
// tile
for (int i = 0; i < outh / 4; i++)
{
for (int j = 0; j < outw / 4; j++)
{
// top_blob_tm.create(tiles, 36, outch, 4u, 1, opt.workspace_allocator);
const int* output0_tm_0 = (const int*)out0_tm + (i * w_tm / 6 + j) * 1;
const int* output0_tm_1 = output0_tm_0 + tiles * 1;
const int* output0_tm_2 = output0_tm_0 + tiles * 2;
const int* output0_tm_3 = output0_tm_0 + tiles * 3;
const int* output0_tm_4 = output0_tm_0 + tiles * 4;
const int* output0_tm_5 = output0_tm_0 + tiles * 5;
int* output0 = out0.row<int>(i * 4) + j * 4;
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
// TODO msa optimize
for (int m = 0; m < 5; m++)
{
int tmp02a = output0_tm_1[0] + output0_tm_2[0];
int tmp13a = output0_tm_1[0] - output0_tm_2[0];
int tmp02b = output0_tm_3[0] + output0_tm_4[0];
int tmp13b = output0_tm_3[0] - output0_tm_4[0];
tmp[0][m] = output0_tm_0[0] + tmp02a + tmp02b;
tmp[1][m] = tmp13a + tmp13b * 2;
tmp[2][m] = tmp02a + tmp02b * 4;
tmp[3][m] = output0_tm_5[0] * 4 + tmp13a + tmp13b * 8;
output0_tm_0 += tiles * 6;
output0_tm_1 += tiles * 6;
output0_tm_2 += tiles * 6;
output0_tm_3 += tiles * 6;
output0_tm_4 += tiles * 6;
output0_tm_5 += tiles * 6;
}
for (int m = 5; m < 6; m++)
{
int tmp02a = output0_tm_1[0] + output0_tm_2[0];
int tmp13a = output0_tm_1[0] - output0_tm_2[0];
int tmp02b = output0_tm_3[0] + output0_tm_4[0];
int tmp13b = output0_tm_3[0] - output0_tm_4[0];
tmp[0][m] = (output0_tm_0[0] + tmp02a + tmp02b) * 4;
tmp[1][m] = (tmp13a + tmp13b * 2) * 4;
tmp[2][m] = (tmp02a + tmp02b * 4) * 4;
tmp[3][m] = (output0_tm_5[0] * 4 + tmp13a + tmp13b * 8) * 4;
output0_tm_0 += tiles * 6;
output0_tm_1 += tiles * 6;
output0_tm_2 += tiles * 6;
output0_tm_3 += tiles * 6;
output0_tm_4 += tiles * 6;
output0_tm_5 += tiles * 6;
}
for (int m = 0; m < 4; m++)
{
const int* tmp0 = tmp[m];
int tmp02a = tmp0[1] + tmp0[2];
int tmp13a = tmp0[1] - tmp0[2];
int tmp02b = tmp0[3] + tmp0[4];
int tmp13b = tmp0[3] - tmp0[4];
output0[0] = (tmp0[0] + tmp02a + tmp02b) / 576;
output0[1] = (tmp13a + tmp13b * 2) / 576;
output0[2] = (tmp02a + tmp02b * 4) / 576;
output0[3] = (tmp0[5] + tmp13a + tmp13b * 8) / 576;
output0 += outw;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
BatchNormalization.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/BatchNormalization.c"
#else
void THNN_(BatchNormalization_updateOutput)(
THNNState *state, THTensor *input, THTensor *output,
THTensor *weight, THTensor *bias,
THTensor *running_mean, THTensor *running_var,
THTensor *save_mean, THTensor *save_std,
bool train, double momentum, double eps)
{
THTensor_(resizeAs)(output, input);
int64_t nInput = THTensor_(size)(input, 1);
int64_t f;
ptrdiff_t n = THTensor_(nElement)(input) / nInput;
#pragma omp parallel for
for (f = 0; f < nInput; ++f) {
THTensor *in = THTensor_(newSelect)(input, 1, f);
THTensor *out = THTensor_(newSelect)(output, 1, f);
real mean, invstd;
if (train) {
// compute mean per input
accreal sum = 0;
TH_TENSOR_APPLY(real, in, sum += *in_data;);
mean = (real) sum / n;
THTensor_(set1d)(save_mean, f, (real) mean);
// compute variance per input
sum = 0;
TH_TENSOR_APPLY(real, in,
sum += (*in_data - mean) * (*in_data - mean););
if (sum == 0 && eps == 0.0) {
invstd = 0;
} else {
invstd = (real) (1 / sqrt(sum/n + eps));
}
THTensor_(set1d)(save_std, f, (real) invstd);
// update running averages
THTensor_(set1d)(running_mean, f,
(real) (momentum * mean + (1 - momentum) * THTensor_(get1d)(running_mean, f)));
accreal unbiased_var = sum / (n - 1);
THTensor_(set1d)(running_var, f,
(real) (momentum * unbiased_var + (1 - momentum) * THTensor_(get1d)(running_var, f)));
} else {
mean = THTensor_(get1d)(running_mean, f);
invstd = 1 / sqrt(THTensor_(get1d)(running_var, f) + eps);
}
// compute output
real w = weight ? THTensor_(get1d)(weight, f) : 1;
real b = bias ? THTensor_(get1d)(bias, f) : 0;
TH_TENSOR_APPLY2(real, in, real, out,
*out_data = (real) (((*in_data - mean) * invstd) * w + b););
THTensor_(free)(out);
THTensor_(free)(in);
}
}
void THNN_(BatchNormalization_backward)(
THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput,
THTensor *gradWeight, THTensor *gradBias, THTensor *weight,
THTensor *running_mean, THTensor *running_var,
THTensor *save_mean, THTensor *save_std,
bool train, double scale, double eps)
{
THNN_CHECK_SHAPE(input, gradOutput);
int64_t nInput = THTensor_(size)(input, 1);
int64_t f;
ptrdiff_t n = THTensor_(nElement)(input) / nInput;
if (gradInput) {
THTensor_(resizeAs)(gradInput, input);
}
#pragma omp parallel for
for (f = 0; f < nInput; ++f) {
THTensor *in = THTensor_(newSelect)(input, 1, f);
THTensor *gradOut = THTensor_(newSelect)(gradOutput, 1, f);
real w = weight ? THTensor_(get1d)(weight, f) : 1;
real mean, invstd;
if (train) {
mean = THTensor_(get1d)(save_mean, f);
invstd = THTensor_(get1d)(save_std, f);
} else {
mean = THTensor_(get1d)(running_mean, f);
invstd = 1 / sqrt(THTensor_(get1d)(running_var, f) + eps);
}
// sum over all gradOutput in feature plane
accreal sum = 0;
TH_TENSOR_APPLY(real, gradOut, sum += *gradOut_data;);
// dot product of the Q(X) and gradOuput
accreal dotp = 0;
TH_TENSOR_APPLY2(real, in, real, gradOut,
dotp += (*in_data - mean) * (*gradOut_data););
if (gradInput) {
THTensor *gradIn = THTensor_(newSelect)(gradInput, 1, f);
if (train) {
// when in training mode
// Q(X) = X - E[x] ; i.e. input centered to zero mean
// Y = Q(X) / σ ; i.e. BN output before weight and bias
// dL/dX = (Q(dL/dY) - dot(Y, dL/dY) * Y) / σ * w
// projection of gradOutput on to output scaled by std
real k = (real) dotp * invstd * invstd / n;
TH_TENSOR_APPLY2(real, gradIn, real, in,
*gradIn_data = (*in_data - mean) * k;);
accreal gradMean = sum / n;
TH_TENSOR_APPLY2(real, gradIn, real, gradOut,
*gradIn_data = (*gradOut_data - gradMean - *gradIn_data) * invstd * w;);
} else {
// when in evaluation mode
// Q(X) = X - running_mean ; i.e. input centered to zero mean
// Y = Q(X) / running_std ; i.e. BN output before weight and bias
// dL/dX = w / running_std
TH_TENSOR_APPLY2(real, gradIn, real, gradOut,
*gradIn_data = *gradOut_data * invstd * w;);
}
THTensor_(free)(gradIn);
}
if (gradWeight) {
real val = THTensor_(get1d)(gradWeight, f);
THTensor_(set1d)(gradWeight, f, val + scale * dotp * invstd);
}
if (gradBias) {
real val = THTensor_(get1d)(gradBias, f);
THTensor_(set1d)(gradBias, f, val + scale * sum);
}
THTensor_(free)(gradOut);
THTensor_(free)(in);
}
}
#endif
|
mandel-omp-for-row.c | /*
* Sequential Mandelbrot program
*
* This program computes and displays all or part of the Mandelbrot
* set. By default, it examines all points in the complex plane
* that have both real and imaginary parts between -2 and 2.
* Command-line parameters allow zooming in on a specific part of
* this range.
*
* Usage:
* mandel [-i maxiter -c x0 y0 -s size -w windowsize]
* where
* maxiter denotes the maximum number of iterations at each point -- by default 1000
* x0, y0, and size specify the range to examine (a square
* centered at (x0 + iy0) of size 2*size by 2*size -- by default,
* a square of size 4 by 4 centered at the origin)
* windowsize denotes the size of the image (diplay window) to compute
*
* Input: none, except the optional command-line arguments
* Output: a graphical display as described in Wilkinson & Allen,
* displayed using the X Window system, plus text output to
* standard output showing the above parameters, plus execution
* time in seconds.
*
* Code based on the original code from Web site for Wilkinson and Allen's
* text on parallel programming:
* http://www.cs.uncc.edu/~abw/parallel/par_prog/
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <malloc.h>
#if _DISPLAY_
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/Xos.h>
#endif
#include <sys/time.h>
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6fs\n",(_m), stamp);
/* Default values for things. */
#define N 2 /* size of problem space (x, y from -N to N) */
#define NPIXELS 800 /* size of display window in pixels */
int row, col; // variables used to traverse the problem space
/* Structure definition for complex numbers */
typedef struct {
double real, imag;
} complex;
#if _DISPLAY_
/* Functions for GUI */
#include "mandelbrot-gui.h" /* has setup(), interact() */
#endif
void mandelbrot(int height,
int width,
double real_min,
double imag_min,
double scale_real,
double scale_imag,
int maxiter,
#if _DISPLAY_
int setup_return,
Display *display,
Window win,
GC gc,
double scale_color,
double min_color)
#else
int ** output)
#endif
{
/* Calculate points and save/display */
#pragma omp parallel private(row,col)
#pragma omp for schedule(runtime)
for (row = 0; row < height; ++row) {
//#pragma omp task
for (col = 0; col < width; ++col) {
complex z, c;
z.real = z.imag = 0;
/* Scale display coordinates to actual region */
c.real = real_min + ((double) col * scale_real);
c.imag = imag_min + ((double) (height-1-row) * scale_imag);
/* height-1-row so y axis displays
* with larger values at top
*/
/* Calculate z0, z1, .... until divergence or maximum iterations */
int k = 0;
double lengthsq, temp;
do {
temp = z.real*z.real - z.imag*z.imag + c.real;
z.imag = 2*z.real*z.imag + c.imag;
z.real = temp;
lengthsq = z.real*z.real + z.imag*z.imag;
++k;
} while (lengthsq < (N*N) && k < maxiter);
#if _DISPLAY_
/* Scale color and display point */
long color = (long) ((k-1) * scale_color) + min_color;
if (setup_return == EXIT_SUCCESS) {
#pragma omp critical
{
XSetForeground (display, gc, color);
XDrawPoint (display, win, gc, col, row);
}
}
#else
output[row][col]=k;
#endif
}
}
}
int main(int argc, char *argv[]) {
int maxiter = 1000;
double real_min;
double real_max;
double imag_min;
double imag_max;
int width = NPIXELS; /* dimensions of display window */
int height = NPIXELS;
double size=N, x0 = 0, y0 = 0;
#if _DISPLAY_
Display *display;
Window win;
GC gc;
int setup_return;
long min_color = 0, max_color = 0;
double scale_color;
#else
int ** output;
FILE *fp = NULL;
#endif
double scale_real, scale_imag;
/* Process command-line arguments */
for (int i=1; i<argc; i++) {
if (strcmp(argv[i], "-i")==0) {
maxiter = atoi(argv[++i]);
}
else if (strcmp(argv[i], "-w")==0) {
width = atoi(argv[++i]);
height = width;
}
else if (strcmp(argv[i], "-s")==0) {
size = atof(argv[++i]);
}
#if !_DISPLAY_
else if (strcmp(argv[i], "-o")==0) {
if((fp=fopen("mandel.out", "wb"))==NULL) {
fprintf(stderr, "Unable to open file\n");
return EXIT_FAILURE;
}
}
#endif
else if (strcmp(argv[i], "-c")==0) {
x0 = atof(argv[++i]);
y0 = atof(argv[++i]);
}
else {
#if _DISPLAY_
fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
#else
fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
fprintf(stderr, " -o to write computed image to disk (default no file generated)\n");
#endif
fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n");
#if _DISPLAY_
fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n");
#else
fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n");
#endif
fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n");
fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n");
return EXIT_FAILURE;
}
}
real_min = x0 - size;
real_max = x0 + size;
imag_min = y0 - size;
imag_max = y0 + size;
/* Produce text output */
fprintf(stdout, "\n");
fprintf(stdout, "Mandelbrot program\n");
fprintf(stdout, "center = (%g, %g), size = %g\n",
(real_max + real_min)/2, (imag_max + imag_min)/2,
(real_max - real_min)/2);
fprintf(stdout, "maximum iterations = %d\n", maxiter);
fprintf(stdout, "\n");
#if _DISPLAY_
/* Initialize for graphical display */
setup_return =
setup(width, height, &display, &win, &gc, &min_color, &max_color);
if (setup_return != EXIT_SUCCESS) {
fprintf(stderr, "Unable to initialize display, continuing\n");
return EXIT_FAILURE;
}
#else
output = malloc(height*sizeof(int *));
for (int row = 0; row < height; ++row)
output[row] = malloc(width*sizeof(int));
#endif
/* Compute factors to scale computational region to window */
scale_real = (double) (real_max - real_min) / (double) width;
scale_imag = (double) (imag_max - imag_min) / (double) height;
#if _DISPLAY_
/* Compute factor for color scaling */
scale_color = (double) (max_color - min_color) / (double) (maxiter - 1);
#endif
/* Start timing */
double stamp;
START_COUNT_TIME;
#if _DISPLAY_
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
setup_return, display, win, gc, scale_color, min_color);
#else
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
output);
#endif
/* End timing */
STOP_COUNT_TIME("Total execution time");
/* Be sure all output is written */
#if _DISPLAY_
if (setup_return == EXIT_SUCCESS) {
XFlush (display);
}
#else
if (fp != NULL)
{
for (int row = 0; row < height; ++row)
if(fwrite(output[row], sizeof(int), width, fp) != width) {
fprintf(stderr, "Output file not written correctly\n");
}
}
#endif
#if _DISPLAY_
/* Wait for user response, then exit program */
if (setup_return == EXIT_SUCCESS) {
interact(display, &win, width, height,
real_min, real_max, imag_min, imag_max);
}
return EXIT_SUCCESS;
#endif
}
|
IJMatrix_parcsr.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* IJMatrix_ParCSR interface
*
*****************************************************************************/
#include "_hypre_IJ_mv.h"
#include "_hypre_parcsr_mv.h"
#include "../HYPRE.h"
/******************************************************************************
*
* hypre_IJMatrixCreateParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixCreateParCSR(hypre_IJMatrix *matrix)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
hypre_ParCSRMatrix *par_matrix;
HYPRE_BigInt row_starts[2];
HYPRE_BigInt col_starts[2];
HYPRE_Int i;
if (hypre_IJMatrixGlobalFirstRow(matrix))
{
for (i = 0; i < 2; i++)
{
row_starts[i] = row_partitioning[i] - hypre_IJMatrixGlobalFirstRow(matrix);
}
}
else
{
for (i = 0; i < 2; i++)
{
row_starts[i] = row_partitioning[i];
}
}
if (hypre_IJMatrixGlobalFirstCol(matrix))
{
for (i = 0; i < 2; i++)
{
col_starts[i] = col_partitioning[i] - hypre_IJMatrixGlobalFirstCol(matrix);
}
}
else
{
for (i = 0; i < 2; i++)
{
col_starts[i] = col_partitioning[i];
}
}
par_matrix = hypre_ParCSRMatrixCreate(comm, hypre_IJMatrixGlobalNumRows(matrix),
hypre_IJMatrixGlobalNumCols(matrix),
row_starts, col_starts, 0, 0, 0);
hypre_IJMatrixObject(matrix) = par_matrix;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetRowSizesParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetRowSizesParCSR(hypre_IJMatrix *matrix,
const HYPRE_Int *sizes)
{
HYPRE_Int local_num_rows, local_num_cols, i, *row_space = NULL;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
local_num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1] - col_partitioning[0]);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
}
if (!row_space)
{
row_space = hypre_CTAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST);
}
for (i = 0; i < local_num_rows; i++)
{
row_space[i] = sizes[i];
}
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, row_space);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixRowSpace(aux_matrix) = row_space;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) = 0;
for (i = 0; i < local_num_rows; i++)
{
hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) += sizes[i];
}
#endif
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetDiagOffdSizesParCSR
* sets diag_i inside the diag part of the ParCSRMatrix
* and offd_i inside the offd part,
* requires exact row sizes for diag and offd
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetDiagOffdSizesParCSR(hypre_IJMatrix *matrix,
const HYPRE_Int *diag_sizes,
const HYPRE_Int *offd_sizes)
{
HYPRE_Int local_num_rows, local_num_cols;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
local_num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1] - col_partitioning[0]);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *)hypre_IJMatrixTranslator(matrix);
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
if ( hypre_AuxParCSRMatrixDiagSizes(aux_matrix) == NULL)
{
hypre_AuxParCSRMatrixDiagSizes(aux_matrix) = hypre_TAlloc(HYPRE_Int, local_num_rows,
HYPRE_MEMORY_HOST);
}
if ( hypre_AuxParCSRMatrixOffdSizes(aux_matrix) == NULL)
{
hypre_AuxParCSRMatrixOffdSizes(aux_matrix) = hypre_TAlloc(HYPRE_Int, local_num_rows,
HYPRE_MEMORY_HOST);
}
hypre_TMemcpy(hypre_AuxParCSRMatrixDiagSizes(aux_matrix), diag_sizes, HYPRE_Int, local_num_rows,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
hypre_TMemcpy(hypre_AuxParCSRMatrixOffdSizes(aux_matrix), offd_sizes, HYPRE_Int, local_num_rows,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetMaxOnProcElmtsParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetMaxOnProcElmtsParCSR(hypre_IJMatrix *matrix,
HYPRE_Int max_on_proc_elmts)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_Int local_num_rows, local_num_cols, my_id;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_MPI_Comm_rank(comm, &my_id);
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
if (!aux_matrix)
{
local_num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1] - col_partitioning[0]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) = max_on_proc_elmts;
#endif
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetMaxOffProcElmtsParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetMaxOffProcElmtsParCSR(hypre_IJMatrix *matrix,
HYPRE_Int max_off_proc_elmts)
{
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_Int local_num_rows, local_num_cols, my_id;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_MPI_Comm_rank(comm, &my_id);
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
if (!aux_matrix)
{
local_num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1] - col_partitioning[0]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows,
local_num_cols, NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_AuxParCSRMatrixUsrOffProcElmts(aux_matrix) = max_off_proc_elmts;
#endif
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixInitializeParCSR
*
* initializes AuxParCSRMatrix and ParCSRMatrix as necessary
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixInitializeParCSR(hypre_IJMatrix *matrix)
{
return hypre_IJMatrixInitializeParCSR_v2(matrix, hypre_HandleMemoryLocation(hypre_handle()));
}
HYPRE_Int
hypre_IJMatrixInitializeParCSR_v2(hypre_IJMatrix *matrix, HYPRE_MemoryLocation memory_location)
{
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
HYPRE_MemoryLocation memory_location_aux =
hypre_GetExecPolicy1(memory_location) == HYPRE_EXEC_HOST ? HYPRE_MEMORY_HOST : HYPRE_MEMORY_DEVICE;
if (hypre_IJMatrixAssembleFlag(matrix) == 0)
{
if (!par_matrix)
{
hypre_IJMatrixCreateParCSR(matrix);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
}
HYPRE_Int local_num_rows = hypre_ParCSRMatrixNumRows(par_matrix);
HYPRE_Int i;
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, hypre_ParCSRMatrixNumCols(par_matrix),
NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_ParCSRMatrixInitialize_v2(par_matrix, memory_location);
hypre_AuxParCSRMatrixInitialize_v2(aux_matrix, memory_location_aux);
/* WM: TODO - implement for sycl... is this available for other non-cuda/hip gpu implementations? */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (hypre_GetExecPolicy1(memory_location_aux) == HYPRE_EXEC_HOST)
#endif
{
if (hypre_AuxParCSRMatrixDiagSizes(aux_matrix))
{
for (i = 0; i < local_num_rows; i++)
{
hypre_CSRMatrixI(diag)[i + 1] = hypre_CSRMatrixI(diag)[i] + hypre_AuxParCSRMatrixDiagSizes(
aux_matrix)[i];
}
hypre_CSRMatrixNumNonzeros(diag) = hypre_CSRMatrixI(diag)[local_num_rows];
hypre_CSRMatrixInitialize(diag);
}
if (hypre_AuxParCSRMatrixOffdSizes(aux_matrix))
{
for (i = 0; i < local_num_rows; i++)
{
hypre_CSRMatrixI(offd)[i + 1] = hypre_CSRMatrixI(offd)[i] + hypre_AuxParCSRMatrixOffdSizes(
aux_matrix)[i];
}
hypre_CSRMatrixNumNonzeros(offd) = hypre_CSRMatrixI(offd)[local_num_rows];
hypre_CSRMatrixInitialize(offd);
}
}
if (!hypre_AuxParCSRMatrixNeedAux(aux_matrix))
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < local_num_rows; i++)
{
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[i] = hypre_CSRMatrixI(diag)[i];
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[i] = hypre_CSRMatrixI(offd)[i];
}
}
}
else if ( memory_location_aux == HYPRE_MEMORY_HOST )
{
/* AB 4/06 - the assemble routine destroys the aux matrix - so we need
to recreate if initialize is called again
*/
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, hypre_ParCSRMatrixNumRows(par_matrix),
hypre_ParCSRMatrixNumCols(par_matrix), NULL);
hypre_AuxParCSRMatrixMemoryLocation(aux_matrix) = HYPRE_MEMORY_HOST;
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixGetRowCountsParCSR
*
* gets the number of columns for rows specified by the user
*
*****************************************************************************/
HYPRE_Int hypre_IJMatrixGetRowCountsParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_BigInt *rows,
HYPRE_Int *ncols)
{
HYPRE_BigInt row_index;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
HYPRE_Int *diag_i = hypre_CSRMatrixI(diag);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
HYPRE_Int *offd_i = hypre_CSRMatrixI(offd);
HYPRE_Int i, my_id, index;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_rank(comm, &my_id);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i, row_index) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < nrows; i++)
{
row_index = rows[i];
if (row_index >= row_partitioning[0] &&
row_index < row_partitioning[1])
{
/* compute local row number */
index = (HYPRE_Int)(row_index - row_partitioning[0]);
ncols[i] = diag_i[index + 1] - diag_i[index] + offd_i[index + 1] - offd_i[index];
}
else
{
ncols[i] = 0;
if (print_level)
{
hypre_printf ("Warning! Row %b is not on Proc. %d!\n",
row_index, my_id);
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixGetValuesParCSR
*
* gets values of an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixGetValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
HYPRE_BigInt *rows,
HYPRE_BigInt *cols,
HYPRE_Complex *values)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
HYPRE_Int assemble_flag = hypre_IJMatrixAssembleFlag(matrix);
hypre_CSRMatrix *diag;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
hypre_CSRMatrix *offd;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_BigInt *col_map_offd;
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(par_matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_Int i, j, n, ii, indx;
HYPRE_Int num_procs, my_id;
HYPRE_BigInt col_0, col_n, row, col_indx, first;
HYPRE_Int row_local, row_size;
HYPRE_Int warning = 0;
HYPRE_Int *counter;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
if (assemble_flag == 0)
{
hypre_error_in_arg(1);
if (print_level)
{
hypre_printf("Error! Matrix not assembled yet! HYPRE_IJMatrixGetValues\n");
}
}
col_0 = col_starts[0];
col_n = col_starts[1] - 1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
}
if (nrows < 0)
{
nrows = -nrows;
counter = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST);
counter[0] = 0;
for (i = 0; i < nrows; i++)
{
counter[i + 1] = counter[i] + ncols[i];
}
indx = 0;
for (i = 0; i < nrows; i++)
{
row = rows[i];
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
row_size = diag_i[row_local + 1] - diag_i[row_local] +
offd_i[row_local + 1] - offd_i[row_local];
if (counter[i] + row_size > counter[nrows])
{
hypre_error_in_arg(1);
if (print_level)
{
hypre_printf ("Error! Not enough memory! HYPRE_IJMatrixGetValues\n");
}
}
if (ncols[i] < row_size)
{
warning = 1;
}
for (j = diag_i[row_local]; j < diag_i[row_local + 1]; j++)
{
cols[indx] = (HYPRE_BigInt)diag_j[j] + col_0;
values[indx++] = diag_data[j];
}
for (j = offd_i[row_local]; j < offd_i[row_local + 1]; j++)
{
cols[indx] = col_map_offd[offd_j[j]];
values[indx++] = offd_data[j];
}
counter[i + 1] = indx;
}
else
{
if (print_level)
{
hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row, my_id);
}
}
}
if (warning)
{
for (i = 0; i < nrows; i++)
{
ncols[i] = counter[i + 1] - counter[i];
}
if (print_level)
{
hypre_printf ("Warning! ncols has been changed!\n");
}
}
hypre_TFree(counter, HYPRE_MEMORY_HOST);
}
else
{
indx = 0;
for (ii = 0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols[ii];
if (n == 0) /* empty row */
{
continue;
}
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
for (i = 0; i < n; i++)
{
col_indx = cols[indx] - first;
values[indx] = 0.0;
if (col_indx < col_0 || col_indx > col_n)
/* search in offd */
{
for (j = offd_i[row_local]; j < offd_i[row_local + 1]; j++)
{
if (col_map_offd[offd_j[j]] == col_indx)
{
values[indx] = offd_data[j];
break;
}
}
}
else /* search in diag */
{
col_indx = col_indx - col_0;
for (j = diag_i[row_local]; j < diag_i[row_local + 1]; j++)
{
if (diag_j[j] == (HYPRE_Int)col_indx)
{
values[indx] = diag_data[j];
break;
}
}
}
indx++;
}
}
else
{
if (print_level)
{
hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row, my_id);
}
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetValuesParCSR
*
* sets values in an IJMatrix before assembly,
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_Int row_local;
//HYPRE_Int row_len;
HYPRE_BigInt col_0, col_n, row;
HYPRE_Int i, ii, j, n, not_found;
//HYPRE_Int col_indx, cnt1;
HYPRE_BigInt **aux_j;
HYPRE_BigInt *local_j;
HYPRE_BigInt *tmp_j;
HYPRE_Complex **aux_data;
HYPRE_Complex *local_data;
HYPRE_Complex *tmp_data;
HYPRE_Int diag_space, offd_space;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int tmp_indx, indx;
HYPRE_Int space, size, old_size;
HYPRE_Int cnt, cnt_diag, cnt_offd;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_BigInt first;
/*HYPRE_Int current_num_elmts;*/
/*HYPRE_Int max_off_proc_elmts;*/
//HYPRE_Int off_proc_i_indx;
//HYPRE_BigInt *off_proc_i;
//HYPRE_BigInt *off_proc_j;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
/*HYPRE_Complex *off_proc_data;*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
col_0 = col_partitioning[0];
col_n = col_partitioning[1] - 1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
if (nrows < 0)
{
hypre_error_in_arg(2);
if (print_level)
{
hypre_printf("Error! nrows negative! HYPRE_IJMatrixSetValues\n");
}
}
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/
{
HYPRE_BigInt *col_map_offd;
HYPRE_Int num_cols_offd;
HYPRE_Int j_offd;
for (ii = 0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
size = diag_i[row_local + 1] - diag_i[row_local] +
offd_i[row_local + 1] - offd_i[row_local];
if (n > size) /* Should we change this and allow this?
This could be same column index, i.e. only last
value is set, previous ones overwritten. */
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
return hypre_error_flag;
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local + 1];
len_offd = offd_i[row_local + 1];
not_found = 1;
for (i = 0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd, cols[indx] - first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
for (j = pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
/* return -1;*/
return hypre_error_flag;
}
diag_data[pos_diag] = values[indx];
}
else /* insert into diag */
{
for (j = pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx] - col_0))
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
/* return -1; */
return hypre_error_flag;
}
}
indx++;
}
}
}
}
else
{
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
for (ii = 0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
else
{
tmp_j = NULL;
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i = 0; i < n; i++)
{
for (j = 0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size + tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt,
size + tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size + tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size + tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i = 0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_BigInt *big_offd_j;
HYPRE_Int col_j;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
big_offd_j = hypre_CSRMatrixBigJ(offd);
offd_data = hypre_CSRMatrixData(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local + 1];
offd_space = offd_i[row_local + 1];
not_found = 1;
for (i = 0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j = offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
else /* insert into diag */
{
col_j = (HYPRE_Int)(cols[indx] - col_0);
for (j = diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == col_j)
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = col_j;
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetConstantValuesParCSR
*
* sets all values in an already assembled IJMatrix to a constant value.
*
*****************************************************************************/
void
hypre_IJMatrixSetConstantValuesParCSRHost( hypre_IJMatrix *matrix,
HYPRE_Complex value )
{
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
HYPRE_Complex *diag_data = hypre_CSRMatrixData(diag);
HYPRE_Complex *offd_data = hypre_CSRMatrixData(offd);
HYPRE_Int nnz_diag = hypre_CSRMatrixNumNonzeros(diag);
HYPRE_Int nnz_offd = hypre_CSRMatrixNumNonzeros(offd);
HYPRE_Int ii;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < nnz_diag; ii++)
{
diag_data[ii] = value;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < nnz_offd; ii++)
{
offd_data[ii] = value;
}
}
HYPRE_Int
hypre_IJMatrixSetConstantValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Complex value )
{
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (hypre_GetExecPolicy1(hypre_IJMatrixMemoryLocation(matrix)) == HYPRE_EXEC_DEVICE)
{
hypre_IJMatrixSetConstantValuesParCSRDevice(matrix, value);
}
else
#endif
{
hypre_IJMatrixSetConstantValuesParCSRHost(matrix, value);
}
}
else
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,
"Matrix not assembled! Required to set constant values!");
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAddToValuesParCSR
*
* adds row values to an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAddToValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_Int row_local;
HYPRE_BigInt row;
HYPRE_BigInt col_0, col_n;
HYPRE_Int i, ii, j, n, not_found;
HYPRE_BigInt **aux_j;
HYPRE_BigInt *local_j;
HYPRE_BigInt *tmp_j;
HYPRE_Complex **aux_data;
HYPRE_Complex *local_data;
HYPRE_Complex *tmp_data;
HYPRE_Int diag_space, offd_space;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int tmp_indx, indx;
HYPRE_Int space, size, old_size;
HYPRE_Int cnt, cnt_diag, cnt_offd;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int offd_indx, diag_indx;
HYPRE_BigInt first;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_Int current_num_elmts;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int off_proc_i_indx;
HYPRE_BigInt *off_proc_i;
HYPRE_BigInt *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
col_0 = col_partitioning[0];
col_n = col_partitioning[1] - 1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
if (hypre_IJMatrixAssembleFlag(matrix))
{
HYPRE_Int num_cols_offd;
HYPRE_BigInt *col_map_offd;
HYPRE_Int j_offd;
/* AB - 4/06 - need to get this object*/
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
for (ii = 0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
size = diag_i[row_local + 1] - diag_i[row_local] +
offd_i[row_local + 1] - offd_i[row_local];
if (n > size) /* Should we change this and allow this?
This could be same column index, i.e. only last
value is set, previous ones overwritten. */
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
return hypre_error_flag;
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local + 1];
len_offd = offd_i[row_local + 1];
not_found = 1;
for (i = 0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd, cols[indx] - first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
/* return -1; */
}
for (j = pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
diag_data[pos_diag] += values[indx];
}
else /* insert into diag */
{
for (j = pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx] - col_0))
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
}
indx++;
}
}
/* not my row */
else
{
if (!aux_matrix)
{
size = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, size, size, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n, 1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, 2 * max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3 * n;
off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2 * max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_data = hypre_TReAlloc(off_proc_data, HYPRE_Complex,
max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
/* AB - 4/6 - the row should be negative to indicate an add */
/* UMY - 12/28/09 - now positive since we eliminated the feature of
setting on other processors */
/* off_proc_i[off_proc_i_indx++] = row; */
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i = 0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix)
= current_num_elmts;
}
}
}
/* not assembled */
else
{
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
for (ii = 0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
else
{
tmp_j = NULL;
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i = 0; i < n; i++)
{
for (j = 0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size + tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt,
size + tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size + tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size + tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i = 0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_BigInt *big_offd_j;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
big_offd_j = hypre_CSRMatrixBigJ(offd);
offd_data = hypre_CSRMatrixData(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local + 1];
offd_space = offd_i[row_local + 1];
not_found = 1;
for (i = 0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j = offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
/* return 1;*/
return hypre_error_flag;
}
}
not_found = 1;
}
else /* insert into diag */
{
HYPRE_Int col_j = (HYPRE_Int)( cols[indx] - col_0);
for (j = diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == col_j)
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = col_j;
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* not my row */
else
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n, 1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) =
max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, 2 * max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3 * n;
off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2 * max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_data = hypre_TReAlloc(off_proc_data, HYPRE_Complex,
max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix)
= max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i = 0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix)
= current_num_elmts;
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixDestroyParCSR
*
* frees an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixDestroyParCSR(hypre_IJMatrix *matrix)
{
hypre_ParCSRMatrixDestroy((hypre_ParCSRMatrix *)hypre_IJMatrixObject(matrix));
hypre_AuxParCSRMatrixDestroy((hypre_AuxParCSRMatrix*)hypre_IJMatrixTranslator(matrix));
/* Reset pointers to NULL */
hypre_IJMatrixObject(matrix) = NULL;
hypre_IJMatrixTranslator(matrix) = NULL;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixTransposeParCSR
*
* Tranposes an IJMatrix of type ParCSRMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixTransposeParCSR( hypre_IJMatrix *matrix_A,
hypre_IJMatrix *matrix_AT )
{
hypre_ParCSRMatrix *par_A = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix_A);
hypre_ParCSRMatrix *par_AT;
/* Free old object if existent */
if (hypre_IJMatrixObject(matrix_AT))
{
par_AT = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix_AT);
hypre_ParCSRMatrixDestroy(par_AT);
hypre_IJMatrixObject(matrix_AT) = NULL;
}
hypre_ParCSRMatrixTranspose(par_A, &par_AT, 1);
hypre_ParCSRMatrixSetNumNonzeros(par_AT);
hypre_ParCSRMatrixSetDNumNonzeros(par_AT);
hypre_MatvecCommPkgCreate(par_AT);
hypre_IJMatrixObject(matrix_AT) = (void *) par_AT;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixNormParCSR
*
* Computes the Infinity norm of an IJMatrix of type ParCSRMatrix
*
* TODO: Add other norms
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixNormParCSR( hypre_IJMatrix *matrix,
HYPRE_Real *norm )
{
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix);
hypre_ParCSRMatrixInfNorm(par_matrix, norm);
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAddParCSR
*
* Performs C = alpha*A + beta*B, where A, B and C are IJMatrices of
* type ParCSRMatrix.
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAddParCSR( HYPRE_Complex alpha,
hypre_IJMatrix *matrix_A,
HYPRE_Complex beta,
hypre_IJMatrix *matrix_B,
hypre_IJMatrix *matrix_C )
{
hypre_ParCSRMatrix *par_A = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix_A);
hypre_ParCSRMatrix *par_B = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix_B);
hypre_ParCSRMatrix *par_C;
/* Free old object if existent */
if (hypre_IJMatrixObject(matrix_C))
{
par_C = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix_C);
hypre_ParCSRMatrixDestroy(par_C);
hypre_IJMatrixObject(matrix_C) = NULL;
}
hypre_ParCSRMatrixAdd(alpha, par_A, beta, par_B, &par_C);
hypre_ParCSRMatrixSetNumNonzeros(par_C);
hypre_ParCSRMatrixSetDNumNonzeros(par_C);
if (!hypre_ParCSRMatrixCommPkg(par_C))
{
hypre_MatvecCommPkgCreate(par_C);
}
hypre_IJMatrixObject(matrix_C) = (void *) par_C;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAssembleOffProcValsParCSR
*
* This is for handling set and get values calls to off-proc. entries -
* it is called from matrix assemble. There is an alternate version for
* when the assumed partition is being used.
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAssembleOffProcValsParCSR( hypre_IJMatrix *matrix,
HYPRE_Int off_proc_i_indx,
HYPRE_Int max_off_proc_elmts,
HYPRE_Int current_num_elmts,
HYPRE_MemoryLocation memory_location,
HYPRE_BigInt *off_proc_i,
HYPRE_BigInt *off_proc_j,
HYPRE_Complex *off_proc_data )
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int i, j, k, in_i;
HYPRE_Int myid;
HYPRE_Int proc_id, last_proc, prev_id, tmp_id;
HYPRE_Int max_response_size;
HYPRE_BigInt global_num_cols;
HYPRE_BigInt global_first_col;
HYPRE_BigInt global_first_row;
HYPRE_Int ex_num_contacts = 0, num_rows = 0;
HYPRE_BigInt range_start, range_end;
HYPRE_Int num_elements;
HYPRE_Int storage;
HYPRE_Int indx;
HYPRE_BigInt row;
HYPRE_Int num_ranges, row_index = 0;
HYPRE_Int num_recvs;
HYPRE_BigInt upper_bound;
HYPRE_Int counter;
HYPRE_Int num_real_procs;
HYPRE_Int /*current_proc,*/ original_proc_indx;
HYPRE_BigInt *row_list = NULL;
HYPRE_Int *row_list_num_elements = NULL;
HYPRE_Int *a_proc_id = NULL, *orig_order = NULL;
HYPRE_Int *real_proc_id = NULL, *us_real_proc_id = NULL;
HYPRE_Int *ex_contact_procs = NULL, *ex_contact_vec_starts = NULL;
HYPRE_BigInt *ex_contact_buf = NULL;
HYPRE_Int *recv_starts = NULL;
HYPRE_BigInt *response_buf = NULL;
HYPRE_Int *response_buf_starts = NULL;
HYPRE_Int *num_rows_per_proc = NULL, *num_elements_total = NULL;
HYPRE_Int *argsort_contact_procs = NULL;
HYPRE_Int obj_size_bytes, complex_size;
HYPRE_BigInt big_int_size;
HYPRE_Int tmp_int;
HYPRE_BigInt tmp_big_int;
HYPRE_BigInt *col_ptr;
HYPRE_BigInt *big_int_data = NULL;
HYPRE_Int big_int_data_size = 0, complex_data_size = 0;
void *void_contact_buf = NULL;
void *index_ptr;
void *recv_data_ptr;
HYPRE_Complex tmp_complex;
HYPRE_Complex *col_data_ptr;
HYPRE_Complex *complex_data = NULL;
hypre_DataExchangeResponse response_obj1, response_obj2;
hypre_ProcListElements send_proc_obj;
hypre_IJAssumedPart *apart;
hypre_MPI_Comm_rank(comm, &myid);
global_num_cols = hypre_IJMatrixGlobalNumCols(matrix);
global_first_col = hypre_IJMatrixGlobalFirstCol(matrix);
global_first_row = hypre_IJMatrixGlobalFirstRow(matrix);
if (memory_location == HYPRE_MEMORY_DEVICE)
{
HYPRE_BigInt *tmp = hypre_TAlloc(HYPRE_BigInt, current_num_elmts,
HYPRE_MEMORY_HOST);
HYPRE_BigInt *off_proc_i_h = hypre_TAlloc(HYPRE_BigInt, 2 * current_num_elmts,
HYPRE_MEMORY_HOST);
HYPRE_BigInt *off_proc_j_h = hypre_TAlloc(HYPRE_BigInt, current_num_elmts,
HYPRE_MEMORY_HOST);
HYPRE_Complex *off_proc_data_h = hypre_TAlloc(HYPRE_Complex, current_num_elmts,
HYPRE_MEMORY_HOST);
hypre_TMemcpy(tmp, off_proc_i, HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST,
HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(off_proc_j_h, off_proc_j, HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST,
HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(off_proc_data_h, off_proc_data, HYPRE_Complex, current_num_elmts, HYPRE_MEMORY_HOST,
HYPRE_MEMORY_DEVICE);
for (i = 0; i < current_num_elmts; i++)
{
#if defined(HYPRE_DEBUG)
hypre_assert(tmp[i] < hypre_IJMatrixRowPartitioning(matrix)[0] ||
tmp[i] >= hypre_IJMatrixRowPartitioning(matrix)[1]);
hypre_assert(tmp[i] >= global_first_row &&
tmp[i] < global_first_row + hypre_IJMatrixGlobalNumRows(matrix));
hypre_assert(off_proc_j_h[i] >= global_first_col &&
off_proc_j_h[i] < global_first_col + global_num_cols);
#endif
off_proc_i_h[2 * i] = tmp[i];
off_proc_i_h[2 * i + 1] = 1;
}
off_proc_i_indx = current_num_elmts * 2;
off_proc_i = off_proc_i_h;
off_proc_j = off_proc_j_h;
off_proc_data = off_proc_data_h;
hypre_TFree(tmp, HYPRE_MEMORY_HOST);
}
/* call hypre_IJMatrixAddToValuesParCSR directly inside this function
* with one chunk of data */
HYPRE_Int off_proc_nelm_recv_cur = 0;
HYPRE_Int off_proc_nelm_recv_max = 0;
HYPRE_BigInt *off_proc_i_recv = NULL;
HYPRE_BigInt *off_proc_j_recv = NULL;
HYPRE_Complex *off_proc_data_recv = NULL;
HYPRE_BigInt *off_proc_i_recv_d = NULL;
HYPRE_BigInt *off_proc_j_recv_d = NULL;
HYPRE_Complex *off_proc_data_recv_d = NULL;
num_rows = off_proc_i_indx / 2;
/* verify that we have created the assumed partition */
if (hypre_IJMatrixAssumedPart(matrix) == NULL)
{
hypre_IJMatrixCreateAssumedPartition(matrix);
}
apart = (hypre_IJAssumedPart*) hypre_IJMatrixAssumedPart(matrix);
/*if (hypre_ParCSRMatrixAssumedPartition(par_matrix) == NULL)
{
hypre_ParCSRMatrixCreateAssumedPartition(par_matrix);
}
apart = hypre_ParCSRMatrixAssumedPartition(par_matrix);*/
row_list = hypre_CTAlloc(HYPRE_BigInt, num_rows, HYPRE_MEMORY_HOST);
row_list_num_elements = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
a_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
orig_order = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
real_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
/* get the assumed processor id for each row */
if (num_rows > 0 )
{
for (i = 0; i < num_rows; i++)
{
row = off_proc_i[i * 2];
//if (row < 0) row = -row - 1;
row_list[i] = row;
row_list_num_elements[i] = off_proc_i[i * 2 + 1];
hypre_GetAssumedPartitionProcFromRow(comm, row, global_first_row,
global_num_cols, &proc_id);
a_proc_id[i] = proc_id;
orig_order[i] = i;
}
/* now we need to find the actual order of each row - sort on row -
this will result in proc ids sorted also...*/
hypre_BigQsortb2i(row_list, a_proc_id, orig_order, 0, num_rows - 1);
/* calculate the number of contacts */
ex_num_contacts = 1;
last_proc = a_proc_id[0];
for (i = 1; i < num_rows; i++)
{
if (a_proc_id[i] > last_proc)
{
ex_num_contacts++;
last_proc = a_proc_id[i];
}
}
}
/* now we will go through a create a contact list - need to contact assumed
processors and find out who the actual row owner is - we will contact with
a range (2 numbers) */
ex_contact_procs = hypre_CTAlloc(HYPRE_Int, ex_num_contacts, HYPRE_MEMORY_HOST);
ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, ex_num_contacts + 1, HYPRE_MEMORY_HOST);
ex_contact_buf = hypre_CTAlloc(HYPRE_BigInt, ex_num_contacts * 2, HYPRE_MEMORY_HOST);
counter = 0;
range_end = -1;
for (i = 0; i < num_rows; i++)
{
if (row_list[i] > range_end)
{
/* assumed proc */
proc_id = a_proc_id[i];
/* end of prev. range */
if (counter > 0)
{
ex_contact_buf[counter * 2 - 1] = row_list[i - 1];
}
/*start new range*/
ex_contact_procs[counter] = proc_id;
ex_contact_vec_starts[counter] = counter * 2;
ex_contact_buf[counter * 2] = row_list[i];
counter++;
hypre_GetAssumedPartitionRowRange(comm, proc_id, global_first_col, global_num_cols,
&range_start, &range_end);
}
}
/* finish the starts */
ex_contact_vec_starts[counter] = counter * 2;
/* finish the last range */
if (counter > 0)
{
ex_contact_buf[counter * 2 - 1] = row_list[num_rows - 1];
}
/* don't allocate space for responses */
/* create response object - can use same fill response as used in the commpkg
routine */
response_obj1.fill_response = hypre_RangeFillResponseIJDetermineRecvProcs;
response_obj1.data1 = apart; /* this is necessary so we can fill responses*/
response_obj1.data2 = NULL;
max_response_size = 6; /* 6 means we can fit 3 ranges*/
hypre_DataExchangeList(ex_num_contacts, ex_contact_procs,
ex_contact_buf, ex_contact_vec_starts, sizeof(HYPRE_BigInt),
sizeof(HYPRE_BigInt), &response_obj1, max_response_size, 1,
comm, (void**) &response_buf, &response_buf_starts);
/* now response_buf contains a proc_id followed by a range upper bound */
hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_buf, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(a_proc_id, HYPRE_MEMORY_HOST);
/*how many ranges were returned?*/
num_ranges = response_buf_starts[ex_num_contacts];
num_ranges = num_ranges / 2;
prev_id = -1;
j = 0;
counter = 0;
num_real_procs = 0;
/* loop through ranges - create a list of actual processor ids*/
for (i = 0; i < num_ranges; i++)
{
upper_bound = response_buf[i * 2 + 1];
counter = 0;
tmp_id = response_buf[i * 2];
/* loop through row_list entries - counting how many are in the range */
while (j < num_rows && row_list[j] <= upper_bound)
{
real_proc_id[j] = tmp_id;
j++;
counter++;
}
if (counter > 0 && tmp_id != prev_id)
{
num_real_procs++;
}
prev_id = tmp_id;
}
/* now we have the list of real processor ids (real_proc_id) - and the number
of distinct ones - so now we can set up data to be sent - we have
HYPRE_Int data and HYPRE_Complex data. that we will need to pack
together */
/* first find out how many rows and elements we need to send per proc - so we
can do storage */
ex_contact_procs = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
num_rows_per_proc = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
num_elements_total = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
counter = 0;
if (num_real_procs > 0 )
{
ex_contact_procs[0] = real_proc_id[0];
num_rows_per_proc[0] = 1;
num_elements_total[0] = row_list_num_elements[orig_order[0]];
/* loop through real procs - these are sorted (row_list is sorted also)*/
for (i = 1; i < num_rows; i++)
{
if (real_proc_id[i] == ex_contact_procs[counter]) /* same processor */
{
num_rows_per_proc[counter] += 1; /*another row */
num_elements_total[counter] += row_list_num_elements[orig_order[i]];
}
else /* new processor */
{
counter++;
ex_contact_procs[counter] = real_proc_id[i];
num_rows_per_proc[counter] = 1;
num_elements_total[counter] = row_list_num_elements[orig_order[i]];
}
}
}
/* to pack together, we need to use the largest obj. size of
(HYPRE_Int) and (HYPRE_Complex) - if these are much different, then we are
wasting some storage, but I do not think that it will be a
large amount since this function should not be used on really
large amounts of data anyway*/
big_int_size = sizeof(HYPRE_BigInt);
complex_size = sizeof(HYPRE_Complex);
obj_size_bytes = hypre_max(big_int_size, complex_size);
/* set up data to be sent to send procs */
/* for each proc, ex_contact_buf contains #rows, row #,
no. elements, col indicies, col data, row #, no. elements, col
indicies, col data, etc. */
/* first calculate total storage and make vec_starts arrays */
storage = 0;
ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, num_real_procs + 1, HYPRE_MEMORY_HOST);
ex_contact_vec_starts[0] = -1;
for (i = 0; i < num_real_procs; i++)
{
storage += 1 + 2 * num_rows_per_proc[i] + 2 * num_elements_total[i];
ex_contact_vec_starts[i + 1] = -storage - 1; /* need negative for next loop */
}
hypre_TFree(num_elements_total, HYPRE_MEMORY_HOST);
/*void_contact_buf = hypre_MAlloc(storage*obj_size_bytes);*/
void_contact_buf = hypre_CTAlloc(char, storage * obj_size_bytes, HYPRE_MEMORY_HOST);
index_ptr = void_contact_buf; /* step through with this index */
/* for each proc: #rows, row #, no. elements,
col indicies, col data, row #, no. elements, col indicies, col data, etc. */
/* un-sort real_proc_id - we want to access data arrays in order, so
cheaper to do this*/
us_real_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
for (i = 0; i < num_rows; i++)
{
us_real_proc_id[orig_order[i]] = real_proc_id[i];
}
hypre_TFree(real_proc_id, HYPRE_MEMORY_HOST);
counter = 0; /* index into data arrays */
prev_id = -1;
for (i = 0; i < num_rows; i++)
{
proc_id = us_real_proc_id[i];
/* can't use row list[i] - you loose the negative signs that differentiate
add/set values */
row = off_proc_i[i * 2];
num_elements = row_list_num_elements[i];
/* find position of this processor */
indx = hypre_BinarySearch(ex_contact_procs, proc_id, num_real_procs);
in_i = ex_contact_vec_starts[indx];
index_ptr = (void *) ((char *) void_contact_buf + in_i * obj_size_bytes);
/* first time for this processor - add the number of rows to the buffer */
if (in_i < 0)
{
in_i = -in_i - 1;
/* re-calc. index_ptr since in_i was negative */
index_ptr = (void *) ((char *) void_contact_buf + in_i * obj_size_bytes);
tmp_int = num_rows_per_proc[indx];
hypre_TMemcpy( index_ptr, &tmp_int, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
}
/* add row # */
hypre_TMemcpy( index_ptr, &row, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
/* add number of elements */
hypre_TMemcpy( index_ptr, &num_elements, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
/* now add col indices */
for (j = 0; j < num_elements; j++)
{
tmp_big_int = off_proc_j[counter + j]; /* col number */
hypre_TMemcpy( index_ptr, &tmp_big_int, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i ++;
}
/* now add data */
for (j = 0; j < num_elements; j++)
{
tmp_complex = off_proc_data[counter++]; /* value */
hypre_TMemcpy( index_ptr, &tmp_complex, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
}
/* increment the indexes to keep track of where we are - we
* adjust below to be actual starts*/
ex_contact_vec_starts[indx] = in_i;
}
/* some clean up */
hypre_TFree(response_buf, HYPRE_MEMORY_HOST);
hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST);
hypre_TFree(us_real_proc_id, HYPRE_MEMORY_HOST);
hypre_TFree(orig_order, HYPRE_MEMORY_HOST);
hypre_TFree(row_list, HYPRE_MEMORY_HOST);
hypre_TFree(row_list_num_elements, HYPRE_MEMORY_HOST);
hypre_TFree(num_rows_per_proc, HYPRE_MEMORY_HOST);
for (i = num_real_procs; i > 0; i--)
{
ex_contact_vec_starts[i] = ex_contact_vec_starts[i - 1];
}
ex_contact_vec_starts[0] = 0;
/* now send the data */
/***********************************/
/* first get the integer info in send_proc_obj */
/* the response we expect is just a confirmation*/
response_buf = NULL;
response_buf_starts = NULL;
/*build the response object*/
/* use the send_proc_obj for the info kept from contacts */
/*estimate inital storage allocation */
send_proc_obj.length = 0;
send_proc_obj.storage_length = num_real_procs + 5;
send_proc_obj.id =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST);
send_proc_obj.vec_starts =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST);
send_proc_obj.vec_starts[0] = 0;
send_proc_obj.element_storage_length = storage + 20;
send_proc_obj.v_elements =
hypre_TAlloc(char, obj_size_bytes * send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST);
response_obj2.fill_response = hypre_FillResponseIJOffProcVals;
response_obj2.data1 = NULL;
response_obj2.data2 = &send_proc_obj;
max_response_size = 0;
hypre_DataExchangeList(num_real_procs, ex_contact_procs,
void_contact_buf, ex_contact_vec_starts, obj_size_bytes,
0, &response_obj2, max_response_size, 2,
comm, (void **) &response_buf, &response_buf_starts);
hypre_TFree(response_buf, HYPRE_MEMORY_HOST);
hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST);
hypre_TFree(void_contact_buf, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST);
/* Now we can unpack the send_proc_objects and call set
and add to values functions. We unpack messages in a
deterministic order, using processor rank */
num_recvs = send_proc_obj.length;
argsort_contact_procs = hypre_CTAlloc(HYPRE_Int, num_recvs, HYPRE_MEMORY_HOST);
for (i = 0; i < num_recvs; i++)
{
argsort_contact_procs[i] = i;
}
/* This sort's the id array, but the original indices are stored in
* argsort_contact_procs */
hypre_qsort2i( send_proc_obj.id, argsort_contact_procs, 0, num_recvs - 1 );
/* alias */
recv_data_ptr = send_proc_obj.v_elements;
recv_starts = send_proc_obj.vec_starts;
for (i = 0; i < num_recvs; i++)
{
/* Find the current processor in order, and reset recv_data_ptr to that processor's message */
original_proc_indx = argsort_contact_procs[i];
/*current_proc = send_proc_obj.id[i];*/
indx = recv_starts[original_proc_indx];
recv_data_ptr = (void *) ((char *) send_proc_obj.v_elements + indx * obj_size_bytes);
/* get the number of rows for this recv */
hypre_TMemcpy( &num_rows, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
for (j = 0; j < num_rows; j++) /* for each row: unpack info */
{
/* row # */
hypre_TMemcpy( &row, recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
/* num elements for this row */
hypre_TMemcpy( &num_elements, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
/* col indices */ /* Need to check this again !!!! */
if (big_int_size == obj_size_bytes)
{
col_ptr = (HYPRE_BigInt *) recv_data_ptr;
recv_data_ptr = (void *) ((char *)recv_data_ptr + num_elements * obj_size_bytes);
}
else /* copy data */
{
if (big_int_data_size < num_elements)
{
big_int_data = hypre_TReAlloc(big_int_data, HYPRE_BigInt, num_elements + 10, HYPRE_MEMORY_HOST);
}
for (k = 0; k < num_elements; k++)
{
hypre_TMemcpy( &big_int_data[k], recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST,
HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
}
col_ptr = big_int_data;
}
/* col data */
if (complex_size == obj_size_bytes)
{
col_data_ptr = (HYPRE_Complex *) recv_data_ptr;
recv_data_ptr = (void *) ((char *)recv_data_ptr + num_elements * obj_size_bytes);
}
else /* copy data */
{
if (complex_data_size < num_elements)
{
complex_data =
hypre_TReAlloc(complex_data, HYPRE_Complex, num_elements + 10, HYPRE_MEMORY_HOST);
}
for (k = 0; k < num_elements; k++)
{
hypre_TMemcpy( &complex_data[k], recv_data_ptr, HYPRE_Complex, 1, HYPRE_MEMORY_HOST,
HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
}
col_data_ptr = complex_data;
}
if (memory_location == HYPRE_MEMORY_HOST)
{
hypre_IJMatrixAddToValuesParCSR(matrix, 1, &num_elements, &row, &row_index, col_ptr, col_data_ptr);
}
else
{
HYPRE_Int nelm_new = off_proc_nelm_recv_cur + num_elements;
if (nelm_new > off_proc_nelm_recv_max)
{
off_proc_nelm_recv_max = nelm_new * 2;
off_proc_i_recv = hypre_TReAlloc(off_proc_i_recv, HYPRE_BigInt, off_proc_nelm_recv_max,
HYPRE_MEMORY_HOST);
off_proc_j_recv = hypre_TReAlloc(off_proc_j_recv, HYPRE_BigInt, off_proc_nelm_recv_max,
HYPRE_MEMORY_HOST);
off_proc_data_recv = hypre_TReAlloc(off_proc_data_recv, HYPRE_Complex, off_proc_nelm_recv_max,
HYPRE_MEMORY_HOST);
}
HYPRE_Int i;
for (i = 0; i < num_elements; i++)
{
off_proc_i_recv[off_proc_nelm_recv_cur + i] = row;
}
hypre_TMemcpy(off_proc_j_recv + off_proc_nelm_recv_cur, col_ptr, HYPRE_BigInt, num_elements,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
hypre_TMemcpy(off_proc_data_recv + off_proc_nelm_recv_cur, col_data_ptr, HYPRE_Complex,
num_elements,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
off_proc_nelm_recv_cur = nelm_new;
}
indx += (num_elements * 2);
}
}
if (memory_location == HYPRE_MEMORY_DEVICE)
{
off_proc_i_recv_d = hypre_TAlloc(HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE);
off_proc_j_recv_d = hypre_TAlloc(HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE);
off_proc_data_recv_d = hypre_TAlloc(HYPRE_Complex, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(off_proc_i_recv_d, off_proc_i_recv, HYPRE_BigInt, off_proc_nelm_recv_cur,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
hypre_TMemcpy(off_proc_j_recv_d, off_proc_j_recv, HYPRE_BigInt, off_proc_nelm_recv_cur,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
hypre_TMemcpy(off_proc_data_recv_d, off_proc_data_recv, HYPRE_Complex, off_proc_nelm_recv_cur,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_IJMatrixSetAddValuesParCSRDevice(matrix, off_proc_nelm_recv_cur, NULL, off_proc_i_recv_d,
NULL, off_proc_j_recv_d,
off_proc_data_recv_d, "add");
#endif
}
hypre_TFree(send_proc_obj.v_elements, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST);
hypre_TFree(argsort_contact_procs, HYPRE_MEMORY_HOST);
if (big_int_data)
{
hypre_TFree(big_int_data, HYPRE_MEMORY_HOST);
}
if (complex_data)
{
hypre_TFree(complex_data, HYPRE_MEMORY_HOST);
}
if (memory_location == HYPRE_MEMORY_DEVICE)
{
hypre_TFree(off_proc_i, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_j, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_data, HYPRE_MEMORY_HOST);
}
hypre_TFree(off_proc_i_recv, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_j_recv, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_data_recv, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_i_recv_d, HYPRE_MEMORY_DEVICE);
hypre_TFree(off_proc_j_recv_d, HYPRE_MEMORY_DEVICE);
hypre_TFree(off_proc_data_recv_d, HYPRE_MEMORY_DEVICE);
return hypre_error_flag;
}
/*--------------------------------------------------------------------
* hypre_FillResponseIJOffProcVals
* Fill response function for the previous function (2nd data exchange)
*--------------------------------------------------------------------*/
HYPRE_Int
hypre_FillResponseIJOffProcVals(void *p_recv_contact_buf,
HYPRE_Int contact_size,
HYPRE_Int contact_proc,
void *ro,
MPI_Comm comm,
void **p_send_response_buf,
HYPRE_Int *response_message_size )
{
HYPRE_Int myid;
HYPRE_Int index, count, elength;
HYPRE_Int object_size;
void *index_ptr;
hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*) ro;
hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*) response_obj->data2;
object_size = hypre_max(sizeof(HYPRE_BigInt), sizeof(HYPRE_Complex));
hypre_MPI_Comm_rank(comm, &myid );
/*check to see if we need to allocate more space in send_proc_obj for vec starts
* and id */
if (send_proc_obj->length == send_proc_obj->storage_length)
{
send_proc_obj->storage_length += 20; /*add space for 20 more contact*/
send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts, HYPRE_Int,
send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST);
if ( send_proc_obj->id != NULL)
{
send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int,
send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST);
}
}
/*initialize*/
count = send_proc_obj->length;
index = send_proc_obj->vec_starts[count]; /* current number of elements */
if ( send_proc_obj->id != NULL)
{
send_proc_obj->id[count] = contact_proc;
}
/*do we need more storage for the elements?*/
if (send_proc_obj->element_storage_length < index + contact_size)
{
elength = hypre_max(contact_size, 100);
elength += index;
send_proc_obj->v_elements = hypre_TReAlloc((char*)send_proc_obj->v_elements,
char, elength * object_size, HYPRE_MEMORY_HOST);
send_proc_obj->element_storage_length = elength;
}
/*populate send_proc_obj*/
index_ptr = (void *) ((char *) send_proc_obj->v_elements + index * object_size);
hypre_TMemcpy(index_ptr, p_recv_contact_buf, char, object_size * contact_size, HYPRE_MEMORY_HOST,
HYPRE_MEMORY_HOST);
send_proc_obj->vec_starts[count + 1] = index + contact_size;
send_proc_obj->length++;
/* output - no message to return (confirmation) */
*response_message_size = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------*/
HYPRE_Int hypre_FindProc(HYPRE_BigInt *list, HYPRE_BigInt value, HYPRE_Int list_length)
{
HYPRE_Int low, high, m;
low = 0;
high = list_length;
if (value >= list[high] || value < list[low])
{
return -1;
}
else
{
while (low + 1 < high)
{
m = (low + high) / 2;
if (value < list[m])
{
high = m;
}
else if (value >= list[m])
{
low = m;
}
}
return low;
}
}
/******************************************************************************
*
* hypre_IJMatrixAssembleParCSR
*
* assembles IJMatrix from AuxParCSRMatrix auxiliary structure
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAssembleParCSR(hypre_IJMatrix *matrix)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
HYPRE_Int *diag_i = hypre_CSRMatrixI(diag);
HYPRE_Int *offd_i = hypre_CSRMatrixI(offd);
HYPRE_Int *diag_j;
HYPRE_Int *offd_j = NULL;
HYPRE_Complex *diag_data;
HYPRE_Complex *offd_data = NULL;
HYPRE_Int i, j, j0;
HYPRE_Int num_cols_offd;
HYPRE_Int *diag_pos;
HYPRE_BigInt *col_map_offd;
HYPRE_Int *rownnz;
HYPRE_Int *row_length;
HYPRE_BigInt **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int my_id, num_procs;
HYPRE_Int num_rows;
HYPRE_Int num_rownnz;
HYPRE_Int i_diag, i_offd;
HYPRE_BigInt col_0, col_n;
HYPRE_Int nnz_offd;
HYPRE_BigInt *big_offd_j;
HYPRE_BigInt *tmp_j;
HYPRE_Complex temp;
HYPRE_BigInt base = hypre_IJMatrixGlobalFirstCol(matrix);
HYPRE_Int off_proc_i_indx;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int current_num_elmts;
HYPRE_BigInt *off_proc_i;
HYPRE_BigInt *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int offd_proc_elmts;
//HYPRE_Int new_off_proc_i_indx;
//HYPRE_Int cancel_indx;
//HYPRE_Int col_indx;
//HYPRE_Int current_indx;
//HYPRE_Int current_i;
//HYPRE_Int row_len;
HYPRE_Int max_num_threads;
HYPRE_Int aux_flag, aux_flag_global;
HYPRE_ANNOTATE_FUNC_BEGIN;
max_num_threads = hypre_NumThreads();
/* first find out if anyone has an aux_matrix, and create one if you don't
* have one, but other procs do */
aux_flag = 0;
aux_flag_global = 0;
if (aux_matrix)
{
aux_flag = 1;
}
hypre_MPI_Allreduce(&aux_flag, &aux_flag_global, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm);
if (aux_flag_global && (!aux_flag))
{
hypre_MPI_Comm_rank(comm, &my_id);
num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, num_rows, num_rows, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
if (aux_matrix)
{
/* first delete all cancelled elements */
/*cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
if (cancel_indx)
{
current_num_elmts=hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i=hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j=hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data=hypre_AuxParCSRMatrixOffProcData(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
col_indx = 0;
current_i = 0;
current_indx = 0;
new_off_proc_i_indx = off_proc_i_indx;
for (i=0; i < off_proc_i_indx; i= i+2)
{
row_len = off_proc_i[i+1];
for (j=0; j < off_proc_i[i+1]; j++)
{
if (off_proc_j[col_indx] == -1)
{
col_indx++;
row_len--;
current_num_elmts--;
}
else
{
off_proc_j[current_indx] = off_proc_j[col_indx];
off_proc_data[current_indx++] = off_proc_data[col_indx++];
}
}
if (row_len)
{
off_proc_i[current_i] = off_proc_i[i];
off_proc_i[current_i+1] = row_len;
current_i += 2;
}
else
{
new_off_proc_i_indx -= 2;
}
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = new_off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts;
}*/
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
hypre_MPI_Allreduce(&off_proc_i_indx, &offd_proc_elmts, 1, HYPRE_MPI_INT,
hypre_MPI_SUM, comm);
if (offd_proc_elmts)
{
max_off_proc_elmts = hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
hypre_IJMatrixAssembleOffProcValsParCSR(
matrix, off_proc_i_indx, max_off_proc_elmts, current_num_elmts,
HYPRE_MEMORY_HOST,
off_proc_i, off_proc_j, off_proc_data);
}
}
if (hypre_IJMatrixAssembleFlag(matrix) == 0)
{
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
col_0 = col_partitioning[0];
col_n = col_partitioning[1] - 1;
/* move data into ParCSRMatrix if not there already */
if (hypre_AuxParCSRMatrixNeedAux(aux_matrix))
{
HYPRE_Int *diag_array;
HYPRE_Int *offd_array;
/* Update nonzero rows of aux_matrix */
hypre_AuxParCSRMatrixSetRownnz(aux_matrix);
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
num_rownnz = hypre_AuxParCSRMatrixLocalNumRownnz(aux_matrix);
rownnz = hypre_AuxParCSRMatrixRownnz(aux_matrix);
diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
diag_pos = hypre_TAlloc(HYPRE_Int, num_rownnz, HYPRE_MEMORY_HOST);
i_diag = i_offd = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i, j, i_diag, i_offd)
#endif
{
HYPRE_BigInt *local_j;
HYPRE_Complex *local_data;
HYPRE_Int ii, rest, size, ns, ne;
HYPRE_Int num_threads, my_thread_num;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
size = num_rownnz / num_threads;
rest = num_rownnz - size * num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num * (size + 1);
ne = (my_thread_num + 1) * (size + 1);
}
else
{
ns = my_thread_num * size + rest;
ne = (my_thread_num + 1) * size + rest;
}
i_diag = i_offd = 0;
for (i = ns; i < ne; i++)
{
ii = rownnz ? rownnz[i] : i;
local_j = aux_j[ii];
local_data = aux_data[ii];
diag_pos[i] = -1;
for (j = 0; j < row_length[ii]; j++)
{
if (local_j[j] < col_0 || local_j[j] > col_n)
{
i_offd++;
}
else
{
i_diag++;
if ((HYPRE_Int)(local_j[j] - col_0) == i)
{
diag_pos[i] = j;
}
}
}
}
diag_array[my_thread_num] = i_diag;
offd_array[my_thread_num] = i_offd;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
i_diag = 0;
i_offd = 0;
for (i = 0; i < num_threads; i++)
{
i_diag += diag_array[i];
i_offd += offd_array[i];
diag_array[i] = i_diag;
offd_array[i] = i_offd;
}
diag_i[num_rows] = i_diag;
offd_i[num_rows] = i_offd;
hypre_TFree(hypre_CSRMatrixJ(diag), hypre_CSRMatrixMemoryLocation(diag));
hypre_TFree(hypre_CSRMatrixData(diag), hypre_CSRMatrixMemoryLocation(diag));
hypre_TFree(hypre_CSRMatrixJ(offd), hypre_CSRMatrixMemoryLocation(offd));
hypre_TFree(hypre_CSRMatrixData(offd), hypre_CSRMatrixMemoryLocation(offd));
hypre_TFree(hypre_CSRMatrixBigJ(offd), hypre_CSRMatrixMemoryLocation(offd));
diag_j = hypre_CTAlloc(HYPRE_Int, i_diag, hypre_CSRMatrixMemoryLocation(diag));
diag_data = hypre_CTAlloc(HYPRE_Complex, i_diag, hypre_CSRMatrixMemoryLocation(diag));
offd_j = hypre_CTAlloc(HYPRE_Int, i_offd, hypre_CSRMatrixMemoryLocation(offd));
offd_data = hypre_CTAlloc(HYPRE_Complex, i_offd, hypre_CSRMatrixMemoryLocation(offd));
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, i_offd, hypre_CSRMatrixMemoryLocation(offd));
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num)
{
i_diag = diag_array[my_thread_num - 1];
i_offd = offd_array[my_thread_num - 1];
}
else
{
i_diag = 0;
i_offd = 0;
}
for (i = ns; i < ne; i++)
{
ii = rownnz ? rownnz[i] : i;
diag_i[ii] = i_diag;
offd_i[ii] = i_offd;
local_j = aux_j[ii];
local_data = aux_data[ii];
if (diag_pos[i] > -1)
{
diag_j[i_diag] = (HYPRE_Int)(local_j[diag_pos[i]] - col_0);
diag_data[i_diag++] = local_data[diag_pos[i]];
}
for (j = 0; j < row_length[ii]; j++)
{
if (local_j[j] < col_0 || local_j[j] > col_n)
{
big_offd_j[i_offd] = local_j[j];
offd_data[i_offd++] = local_data[j];
}
else if (j != diag_pos[i])
{
diag_j[i_diag] = (HYPRE_Int)(local_j[j] - col_0);
diag_data[i_diag++] = local_data[j];
}
}
}
/* Correct diag_i and offd_i */
if (rownnz != NULL)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = ns; i < (ne - 1); i++)
{
for (ii = rownnz[i] + 1; ii < rownnz[i + 1]; ii++)
{
diag_i[ii] = diag_i[rownnz[i + 1]];
offd_i[ii] = offd_i[rownnz[i + 1]];
}
}
if (my_thread_num < (num_threads - 1))
{
for (ii = rownnz[ne - 1] + 1; ii < rownnz[ne]; ii++)
{
diag_i[ii] = diag_i[rownnz[ne]];
offd_i[ii] = offd_i[rownnz[ne]];
}
}
else
{
for (ii = rownnz[ne - 1] + 1; ii < num_rows; ii++)
{
diag_i[ii] = diag_i[num_rows];
offd_i[ii] = offd_i[num_rows];
}
}
}
} /* end parallel region */
hypre_TFree(diag_array, HYPRE_MEMORY_HOST);
hypre_TFree(offd_array, HYPRE_MEMORY_HOST);
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_data;
hypre_CSRMatrixNumNonzeros(diag) = diag_i[num_rows];
if (offd_i[num_rows] > 0)
{
hypre_CSRMatrixJ(offd) = offd_j;
hypre_CSRMatrixBigJ(offd) = big_offd_j;
hypre_CSRMatrixData(offd) = offd_data;
}
hypre_CSRMatrixNumNonzeros(offd) = offd_i[num_rows];
hypre_TFree(diag_pos, HYPRE_MEMORY_HOST);
}
else
{
/* move diagonal element into first space */
big_offd_j = hypre_CSRMatrixBigJ(offd);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private (i,j,j0,temp)
#endif
for (i = 0; i < num_rows; i++)
{
j0 = diag_i[i];
for (j = j0; j < diag_i[i + 1]; j++)
{
if (diag_j[j] == i)
{
temp = diag_data[j0];
diag_data[j0] = diag_data[j];
diag_data[j] = temp;
diag_j[j] = diag_j[j0];
diag_j[j0] = i;
break;
}
}
}
offd_j = hypre_CSRMatrixJ(offd);
if (!offd_j && offd_i[num_rows])
{
offd_j = hypre_CTAlloc(HYPRE_Int, offd_i[num_rows], hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixJ(offd) = offd_j;
}
}
/* generate col_map_offd */
nnz_offd = offd_i[num_rows];
if (nnz_offd)
{
tmp_j = hypre_CTAlloc(HYPRE_BigInt, nnz_offd, HYPRE_MEMORY_HOST);
for (i = 0; i < nnz_offd; i++)
{
tmp_j[i] = big_offd_j[i];
}
hypre_BigQsort0(tmp_j, 0, nnz_offd - 1);
num_cols_offd = 1;
for (i = 0; i < nnz_offd - 1; i++)
{
if (tmp_j[i + 1] > tmp_j[i])
{
tmp_j[num_cols_offd++] = tmp_j[i + 1];
}
}
col_map_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST);
for (i = 0; i < num_cols_offd; i++)
{
col_map_offd[i] = tmp_j[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i)
#endif
for (i = 0; i < nnz_offd; i++)
{
offd_j[i] = hypre_BigBinarySearch(col_map_offd, big_offd_j[i], num_cols_offd);
}
if (base)
{
for (i = 0; i < num_cols_offd; i++)
{
col_map_offd[i] -= base;
}
}
hypre_ParCSRMatrixColMapOffd(par_matrix) = col_map_offd;
hypre_CSRMatrixNumCols(offd) = num_cols_offd;
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(big_offd_j, hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = NULL;
}
hypre_IJMatrixAssembleFlag(matrix) = 1;
/* Generate the nonzero rows in the diag and offd matrices */
hypre_CSRMatrixSetRownnz(diag);
hypre_CSRMatrixSetRownnz(offd);
}
/* Free memory */
hypre_AuxParCSRMatrixDestroy(aux_matrix);
hypre_IJMatrixTranslator(matrix) = NULL;
HYPRE_ANNOTATE_FUNC_END;
return hypre_error_flag;
}
/******************************************************************************
*
* IJMatrix_ParCSR interface
*
*****************************************************************************/
#include "_hypre_IJ_mv.h"
#include "../HYPRE.h"
/******************************************************************************
*
* hypre_IJMatrixSetValuesOMPParCSR
*
* sets values in an IJMatrix before assembly,
* use of this routine requires that the values in rows are different from each
* other, i.e rows[i] != rows[j] for i != j
* to ensure accurate threading
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetValuesOMPParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_BigInt col_0, col_n, first;
//HYPRE_Int cancel_indx;
HYPRE_BigInt **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_BigInt *big_offd_j;
HYPRE_Complex *offd_data;
/*HYPRE_Int current_num_elmts;*/
/*HYPRE_Int max_off_proc_elmts;*/
//HYPRE_Int off_proc_i_indx;
//HYPRE_BigInt *off_proc_i;
//HYPRE_BigInt *off_proc_j;
//HYPRE_Int *offproc_cnt;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
//HYPRE_Int max_num_threads;
HYPRE_Int error_flag = 0;
/*HYPRE_Complex *off_proc_data;*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
//max_num_threads = hypre_NumThreads();
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
//offproc_cnt = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
col_0 = col_partitioning[0];
col_n = col_partitioning[1] - 1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
if (nrows < 0)
{
hypre_error_in_arg(2);
if (print_level)
{
hypre_printf("Error! nrows negative! HYPRE_IJMatrixSetValues\n");
}
return hypre_error_flag;
}
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/
{
HYPRE_BigInt *col_map_offd;
HYPRE_Int num_cols_offd;
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
/*if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
}*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int j_offd;
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
//HYPRE_Int row_len;
HYPRE_Int row_local;
HYPRE_Int i, j, ii, n;
HYPRE_BigInt row;
HYPRE_Int not_found, size, indx;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows / num_threads;
rest = nrows - len * num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num * (len + 1);
ne = (my_thread_num + 1) * (len + 1);
}
else
{
ns = my_thread_num * len + rest;
ne = (my_thread_num + 1) * len + rest;
}
for (ii = ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
size = diag_i[row_local + 1] - diag_i[row_local]
+ offd_i[row_local + 1] - offd_i[row_local];
if (n > size)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
break;
/*return hypre_error_flag; */
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local + 1];
len_offd = offd_i[row_local + 1];
not_found = 1;
for (i = 0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd, cols[indx] - first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag; */
}
for (j = pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag; */
}
diag_data[pos_diag] = values[indx];
}
else /* insert into diag */
{
for (j = pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx] - col_0))
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
}
indx++;
}
}
/* processor does not own the row */
//else /*search for previous occurrences and cancel them */
/*{
if (aux_matrix)
{
col_indx = 0;
for (i=0; i < off_proc_i_indx; i=i+2)
{
row_len = off_proc_i[i+1];
if (off_proc_i[i] == row)
{
for (j=0; j < n; j++)
{
cnt1 = col_indx;
for (k=0; k < row_len; k++)
{
if (off_proc_j[cnt1] == cols[j])
{
off_proc_j[cnt1++] = -1;
offproc_cnt[my_thread_num]++; */
/*cancel_indx++;*/
/* if no repetition allowed */
/* off_proc_j[col_indx] = -1;
col_indx -= k;
break; */
/*}
else
{
cnt1++;
}
}
}
col_indx += row_len;
}
else
{
col_indx += row_len;
}
}*/
/*hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;*/
//}
//}
}
} /*end parallel region */
}
else /* matrix not assembled */
{
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
/*if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
}*/
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
}
else
{
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
offd_data = hypre_CSRMatrixData(offd);
big_offd_j = hypre_CSRMatrixBigJ(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_BigInt *tmp_j = NULL;
HYPRE_BigInt *local_j = NULL;
HYPRE_Complex *tmp_data = NULL;
HYPRE_Complex *local_data = NULL;
HYPRE_Int tmp_indx;
//HYPRE_Int row_len;
HYPRE_Int row_local;
HYPRE_Int i, j, ii, n;
HYPRE_BigInt row;
HYPRE_Int not_found, size, indx;
HYPRE_Int old_size, space, cnt;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows / num_threads;
rest = nrows - len * num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num * (len + 1);
ne = (my_thread_num + 1) * (len + 1);
}
else
{
ns = my_thread_num * len + rest;
ne = (my_thread_num + 1) * len + rest;
}
for (ii = ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
if (need_aux)
{
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i = 0; i < n; i++)
{
for (j = 0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size + tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt,
size + tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size + tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size + tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i = 0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int offd_space, diag_space;
HYPRE_Int cnt_diag, cnt_offd;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local + 1];
offd_space = offd_i[row_local + 1];
not_found = 1;
for (i = 0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j = offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
else /* insert into diag */
{
for (j = diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx] - col_0))
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = (HYPRE_Int)(cols[indx] - col_0);
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* processor does not own the row */
/*else
{
if (aux_matrix)
{
col_indx = 0;
for (i=0; i < off_proc_i_indx; i=i+2)
{
row_len = off_proc_i[i+1];
if (off_proc_i[i] == row)
{
for (j=0; j < n; j++)
{
cnt1 = col_indx;
for (k=0; k < row_len; k++)
{
if (off_proc_j[cnt1] == cols[j])
{
off_proc_j[cnt1++] = -1; */
/*cancel_indx++;*/
//offproc_cnt[my_thread_num]++;
/* if no repetition allowed */
/* off_proc_j[col_indx] = -1;
col_indx -= k;
break; */
/* }
else
{
cnt1++;
}
}
}
col_indx += row_len;
}
else
{
col_indx += row_len;
}
}*/
/*hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;*/
/*}
}*/
}
} /* end parallel region */
}
/*if (error_flag)
{
return hypre_error_flag;
}
if (aux_matrix)
{
for (i1=0; i1 < max_num_threads; i1++)
{
cancel_indx += offproc_cnt[i1];
}
hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;
}*/
//hypre_TFree(offproc_cnt, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAddToValuesOMPParCSR
*
* adds row values to an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAddToValuesOMPParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_BigInt col_0, col_n, first;
HYPRE_BigInt **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_BigInt *big_offd_j;
HYPRE_Complex *offd_data;
HYPRE_Int current_num_elmts;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int off_proc_i_indx;
HYPRE_BigInt *off_proc_i;
HYPRE_BigInt *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int **offproc_cnt;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
HYPRE_Int max_num_threads;
HYPRE_Int error_flag = 0;
HYPRE_Int i1;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
max_num_threads = hypre_NumThreads();
par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
offproc_cnt = hypre_CTAlloc(HYPRE_Int *, max_num_threads, HYPRE_MEMORY_HOST);
for (i1 = 0; i1 < max_num_threads; i1++)
{
offproc_cnt[i1] = NULL;
}
col_0 = col_partitioning[0];
col_n = col_partitioning[1] - 1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled */
{
HYPRE_Int num_cols_offd;
HYPRE_BigInt *col_map_offd;
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int j_offd;
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int row_local;
HYPRE_Int i, j, ii, n;
HYPRE_BigInt row;
HYPRE_Int not_found, size, indx;
HYPRE_Int *my_offproc_cnt = NULL;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows / num_threads;
rest = nrows - len * num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num * (len + 1);
ne = (my_thread_num + 1) * (len + 1);
}
else
{
ns = my_thread_num * len + rest;
ne = (my_thread_num + 1) * len + rest;
}
for (ii = ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
size = diag_i[row_local + 1] - diag_i[row_local]
+ offd_i[row_local + 1] - offd_i[row_local];
if (n > size)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
break;
/*return hypre_error_flag; */
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local + 1];
len_offd = offd_i[row_local + 1];
not_found = 1;
for (i = 0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd, cols[indx] - first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
for (j = pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
diag_data[pos_diag] += values[indx];
}
else /* insert into diag */
{
for (j = pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx] - col_0))
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
}
indx++;
}
}
/* not my row */
/* need to find solution for threaded version!!!! */
/* could save row number and process later .... */
else
{
if (!my_offproc_cnt)
{
my_offproc_cnt = hypre_CTAlloc(HYPRE_Int, 200, HYPRE_MEMORY_HOST);
offproc_cnt[my_thread_num] = my_offproc_cnt;
my_offproc_cnt[0] = 200;
my_offproc_cnt[1] = 2;
}
i = my_offproc_cnt[1];
if (i + 2 < my_offproc_cnt[0])
{
my_offproc_cnt[i] = ii;
my_offproc_cnt[i + 1] = indx;
my_offproc_cnt[1] += 2;
}
else
{
size = my_offproc_cnt[0];
my_offproc_cnt = hypre_TReAlloc(my_offproc_cnt, HYPRE_Int, size + 200, HYPRE_MEMORY_HOST);
my_offproc_cnt[0] += 200;
my_offproc_cnt[i] = ii;
my_offproc_cnt[i + 1] = indx;
my_offproc_cnt[1] += 2;
}
}
}
} /* end parallel region */
}
/* not assembled */
else
{
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
}
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
}
else
{
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
big_offd_j = hypre_CSRMatrixBigJ(offd);
offd_data = hypre_CSRMatrixData(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_BigInt *tmp_j = NULL;
HYPRE_BigInt *local_j = NULL;
HYPRE_Complex *tmp_data = NULL;
HYPRE_Complex *local_data = NULL;
HYPRE_Int tmp_indx;
HYPRE_Int row_local;
HYPRE_BigInt row;
HYPRE_Int i, j, ii, n;
HYPRE_Int not_found, size, indx;
HYPRE_Int old_size, space, cnt;
HYPRE_Int *my_offproc_cnt = NULL;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows / num_threads;
rest = nrows - len * num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num * (len + 1);
ne = (my_thread_num + 1) * (len + 1);
}
else
{
ns = my_thread_num * len + rest;
ne = (my_thread_num + 1) * len + rest;
}
for (ii = ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
if (need_aux)
{
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i = 0; i < n; i++)
{
for (j = 0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size + tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt,
size + tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size + tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size + tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i = 0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int offd_space, diag_space;
HYPRE_Int cnt_diag, cnt_offd;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local + 1];
offd_space = offd_i[row_local + 1];
not_found = 1;
for (i = 0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j = offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
else /* insert into diag */
{
for (j = diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx] - col_0))
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = (HYPRE_Int)(cols[indx] - col_0);
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* not my row */
else
{
if (!my_offproc_cnt)
{
my_offproc_cnt = hypre_CTAlloc(HYPRE_Int, 200, HYPRE_MEMORY_HOST);
offproc_cnt[my_thread_num] = my_offproc_cnt;
my_offproc_cnt[0] = 200;
my_offproc_cnt[1] = 2;
}
i = my_offproc_cnt[1];
if (i + 2 < my_offproc_cnt[0])
{
my_offproc_cnt[i] = ii;
my_offproc_cnt[i + 1] = indx;
my_offproc_cnt[1] += 2;
}
else
{
size = my_offproc_cnt[0];
my_offproc_cnt = hypre_TReAlloc(my_offproc_cnt, HYPRE_Int, size + 200, HYPRE_MEMORY_HOST);
my_offproc_cnt[0] += 200;
my_offproc_cnt[i] = ii;
my_offproc_cnt[i + 1] = indx;
my_offproc_cnt[1] += 2;
}
}
}
} /*end parallel region */
}
if (error_flag)
{
return hypre_error_flag;
}
if (!aux_matrix)
{
HYPRE_Int size = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, size, size, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
for (i1 = 0; i1 < max_num_threads; i1++)
{
if (offproc_cnt[i1])
{
HYPRE_Int *my_offproc_cnt = offproc_cnt[i1];
HYPRE_Int i, i2, ii, n, indx;
HYPRE_BigInt row;
for (i2 = 2; i2 < my_offproc_cnt[1]; i2 += 2)
{
ii = my_offproc_cnt[i2];
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = my_offproc_cnt[i2 + 1];
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n, 1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) =
max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, 2 * max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3 * n;
off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2 * max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_data = hypre_TReAlloc(off_proc_data, HYPRE_Complex,
max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix)
= max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i = 0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts;
}
hypre_TFree(offproc_cnt[i1], HYPRE_MEMORY_HOST);
}
}
hypre_TFree(offproc_cnt, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
|
omp50_task_depend_mtx3.c | // RUN: %libomp-compile-and-run
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8
// UNSUPPORTED: clang-3, clang-4, clang-5, clang-6, clang-7, clang-8
// TODO: update expected result when icc supports mutexinoutset
// XFAIL: icc
// Tests OMP 5.0 task dependences "mutexinoutset", emulates compiler codegen
// Mutually exclusive tasks get same input dependency info array
//
// Task tree created:
// task0 task1
// \ / \
// task2 task5
// / \
// task3 task4
// / \
// task6 <-->task7 (these two are mutually exclusive)
// \ /
// task8
//
#include <stdio.h>
#include <omp.h>
#include "omp_my_sleep.h"
static int checker = 0; // to check if two tasks run simultaneously
static int err = 0;
#ifndef DELAY
#define DELAY 0.1
#endif
int mutex_task(int task_id) {
int th = omp_get_thread_num();
#pragma omp atomic
++checker;
printf("task %d, th %d\n", task_id, th);
if (checker != 1) {
err++;
printf("Error1, checker %d != 1\n", checker);
}
my_sleep(DELAY);
if (checker != 1) {
err++;
printf("Error2, checker %d != 1\n", checker);
}
#pragma omp atomic
--checker;
return 0;
}
int main()
{
int i1,i2,i3,i4;
omp_set_num_threads(2);
#pragma omp parallel
{
#pragma omp single nowait
{
int t = omp_get_thread_num();
#pragma omp task depend(in: i1, i2)
{ int th = omp_get_thread_num();
printf("task 0_%d, th %d\n", t, th);
my_sleep(DELAY); }
#pragma omp task depend(in: i1, i3)
{ int th = omp_get_thread_num();
printf("task 1_%d, th %d\n", t, th);
my_sleep(DELAY); }
#pragma omp task depend(in: i2) depend(out: i1)
{ int th = omp_get_thread_num();
printf("task 2_%d, th %d\n", t, th);
my_sleep(DELAY); }
#pragma omp task depend(in: i1)
{ int th = omp_get_thread_num();
printf("task 3_%d, th %d\n", t, th);
my_sleep(DELAY); }
#pragma omp task depend(out: i2)
{ int th = omp_get_thread_num();
printf("task 4_%d, th %d\n", t, th);
my_sleep(DELAY+0.1); } // wait a bit longer than task 3
#pragma omp task depend(out: i3)
{ int th = omp_get_thread_num();
printf("task 5_%d, th %d\n", t, th);
my_sleep(DELAY); }
#pragma omp task depend(mutexinoutset: i1, i4)
{ mutex_task(6); }
#pragma omp task depend(mutexinoutset: i1, i4)
{ mutex_task(7); }
#pragma omp task depend(in: i1)
{ int th = omp_get_thread_num();
printf("task 8_%d, th %d\n", t, th);
my_sleep(DELAY); }
} // single
} // parallel
if (err == 0) {
printf("passed\n");
return 0;
} else {
printf("failed\n");
return 1;
}
}
|
GB_unop__identity_int16_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int16_uint64
// op(A') function: GB_unop_tran__identity_int16_uint64
// C type: int16_t
// A type: uint64_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = (int16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = (int16_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int16_uint64
(
int16_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int16_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Vec.h | /*************************************************************************
* Copyright (c) 2014 Zhang Dongdong
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**************************************************************************/
#ifndef VEC_H
#define VEC_H
/*
Szymon Rusinkiewicz
Princeton University
Vec.h
Class for a constant-length vector
Supports the following operations:
vec v1; // Initialized to (0,0,0)
vec v2(1,2,3); // Initialized to (1,2,3)
vec v3(v2); // Copy constructor
float farray[3];
vec v4 = vec(farray); // Explicit: "v4 = farray" won't work
Vec<3,double> vd; // The "vec" used above is Vec<3,float>
point p1, p2, p3; // Same as vec
v3 = v1 + v2; // Also -, *, / (all componentwise)
v3 = 3.5f * v1; // Also vec * scalar, vec / scalar
// NOTE: scalar has to be the same type:
// it won't work to do double * vec<float>
v1 = min(v2,v3); // Componentwise min/max
v1 = sin(v2); // Componentwise - all the usual functions...
swap(v1,v2); // In-place swap
v3 = v1 DOT v2; // Actually operator^
v3 = v1 CROSS v2; // Actually operator%
float f = v1[0]; // Subscript
float *fp = v1; // Implicit conversion to float *
f = len(v1); // Length (also len2 == squared length)
f = dist(p1, p2); // Distance (also dist2 == squared distance)
normalize(v1); // Normalize (i.e., make it unit length)
// normalize(vec(0,0,0)) => vec(1,0,0)
v1 = trinorm(p1,p2,p3); // Normal of triangle (area-weighted)
cout << v1 << endl; // iostream output in the form (1,2,3)
cin >> v2; // iostream input using the same syntax
Also defines the utility functions sqr, cube, sgn, fract, clamp, mix,
step, smoothstep, faceforward, reflect, and refract
*/
// Windows defines min and max as macros, which prevents us from using the
// type-safe versions from std::, as well as interfering with method defns.
// Also define NOMINMAX, which prevents future bad definitions.
#ifdef min
# undef min
#endif
#ifdef max
# undef max
#endif
#ifndef NOMINMAX
# define NOMINMAX
#endif
#include <cmath>
#include <iostream>
#include <algorithm>
#include <cstddef>
// Let gcc optimize conditional branches a bit better...
#ifndef likely
# if !defined(__GNUC__) || (__GNUC__ == 2 && __GNUC_MINOR__ < 96)
# define likely(x) (x)
# define unlikely(x) (x)
# else
# define likely(x) (__builtin_expect((x), 1))
# define unlikely(x) (__builtin_expect((x), 0))
# endif
#endif
// Boost-like compile-time assertion checking
template <bool X> struct VEC_STATIC_ASSERTION_FAILURE;
template <> struct VEC_STATIC_ASSERTION_FAILURE<true>
{ void operator () () {} };
#define VEC_STATIC_CHECK(expr) VEC_STATIC_ASSERTION_FAILURE<bool(expr)>()
template <int D, class T = float>
class Vec {
protected:
T v[D];
public:
// Constructor for no arguments. Everything initialized to 0.
Vec() { for (int i = 0; i < D; i++) v[i] = T(0); }
// Uninitialized constructor - meant mostly for internal use
#define VEC_UNINITIALIZED ((void *) 0)
Vec(void *) {}
// Constructors for 2-4 arguments
Vec(T x, T y)
{ VEC_STATIC_CHECK(D == 2); v[0] = x; v[1] = y; }
Vec(T x, T y, T z)
{ VEC_STATIC_CHECK(D == 3); v[0] = x; v[1] = y; v[2] = z; }
Vec(T x, T y, T z, T w)
{ VEC_STATIC_CHECK(D == 4); v[0] = x; v[1] = y; v[2] = z; v[3] = w; }
// Constructor from anything that can be accessed using []
// Pretty aggressive, so marked as explicit.
template <class S> explicit Vec(const S &x)
{ for (int i = 0; i < D; i++) v[i] = T(x[i]); }
// No destructor or assignment operator needed
// Array reference and conversion to pointer - no bounds checking
const T &operator [] (int i) const
{ return v[i]; }
T &operator [] (int i)
{ return v[i]; }
operator const T * () const
{ return v; }
operator const T * ()
{ return v; }
operator T * ()
{ return v; }
// Member operators
Vec<D,T> &operator += (const Vec<D,T> &x)
{
for (int i = 0; i < D; i++)
#pragma omp atomic
v[i] += x[i];
return *this;
}
Vec<D,T> &operator -= (const Vec<D,T> &x)
{
for (int i = 0; i < D; i++)
#pragma omp atomic
v[i] -= x[i];
return *this;
}
Vec<D,T> &operator *= (const Vec<D,T> &x)
{
for (int i = 0; i < D; i++)
#pragma omp atomic
v[i] *= x[i];
return *this;
}
Vec<D,T> &operator *= (const T &x)
{
for (int i = 0; i < D; i++)
#pragma omp atomic
v[i] *= x;
return *this;
}
Vec<D,T> &operator /= (const Vec<D,T> &x)
{
for (int i = 0; i < D; i++)
#pragma omp atomic
v[i] /= x[i];
return *this;
}
Vec<D,T> &operator /= (const T &x)
{
for (int i = 0; i < D; i++)
#pragma omp atomic
v[i] /= x;
return *this;
}
// Set each component to min/max of this and the other vector
Vec<D,T> &min(const Vec<D,T> &x)
{
#pragma omp critical
for (int i = 0; i < D; i++)
if (x[i] < v[i]) v[i] = x[i];
return *this;
}
Vec<D,T> &max(const Vec<D,T> &x)
{
#pragma omp critical
for (int i = 0; i < D; i++)
if (x[i] > v[i]) v[i] = x[i];
return *this;
}
// Swap with another vector. (Also exists as a global function.)
void swap(Vec<D,T> &x)
{
using namespace std;
#pragma omp critical
for (int i = 0; i < D; i++) swap(v[i], x[i]);
}
// Outside of class: + - * / % ^ << >>
// Some partial compatibility with std::vector
typedef T value_type;
typedef T *pointer;
typedef const T *const_pointer;
typedef T *iterator;
typedef const T *const_iterator;
typedef T &reference;
typedef const T &const_reference;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
size_t size() const
{ return D; }
T *begin()
{ return &(v[0]); }
const T *begin() const
{ return &(v[0]); }
T *end()
{ return begin() + D; }
const T *end() const
{ return begin() + D; }
// clear() and empty() - set to zero or check for all zero
void clear()
{ for (int i = 0; i < D; i++) v[i] = T(0); }
bool empty() const
{
for (int i = 0; i < D; i++)
if (v[i]) return false;
return true;
}
// Some partial compatibility with std::valarray, plus generalizations
T sum() const
{
T total = v[0];
for (int i = 1; i < D; i++)
total += v[i];
return total;
}
T sumabs() const
{
T total = fabs(v[0]);
for (int i = 1; i < D; i++)
total += fabs(v[i]);
return total;
}
T avg() const
{ return sum() / D; }
T product() const
{
T total = v[0];
for (int i = 1; i < D; i++)
total *= v[i];
return total;
}
T min() const
{
T m = v[0];
for (int i = 1; i < D; i++)
if (v[i] < m) m = v[i];
return m;
}
T max() const
{
T m = v[0];
for (int i = 1; i < D; i++)
if (v[i] > m) m = v[i];
return m;
}
Vec<D,T> apply(T func(T)) const
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++) result[i] = func(v[i]);
return result;
}
Vec<D,T> apply(T func(const T&)) const
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++) result[i] = func(v[i]);
return result;
}
Vec<D,T> cshift(int n) const
{
Vec<D,T> result(VEC_UNINITIALIZED);
if (n < 0)
n = (n % D) + D;
for (int i = 0; i < D; i++)
result[i] = v[(i+n)%D];
return result;
}
Vec<D,T> shift(int n) const
{
if (abs(n) >= D)
return Vec<D,T>();
Vec<D,T> result; // Must be initialized to zero
int start = n < T(0) ? -n : 0;
int stop = n > T(0) ? D - n : D;
for (int i = start; i < stop; i++) result[i] = v[i+n];
return result;
}
};
// Shorthands for particular flavors of Vecs
typedef Vec<3,float> vec;
typedef Vec<3,float> point;
typedef Vec<2,float> vec2;
typedef Vec<3,float> vec3;
typedef Vec<4,float> vec4;
typedef Vec<2,int> ivec2;
typedef Vec<3,int> ivec3;
typedef Vec<4,int> ivec4;
typedef Vec<3, unsigned short int> uvec3;
// Nonmember operators that take two Vecs
template <int D, class T>
static inline const Vec<D,T> operator + (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = v1[i] + v2[i];
return result;
}
template <int D, class T>
static inline const Vec<D,T> operator - (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = v1[i] - v2[i];
return result;
}
template <int D, class T>
static inline const Vec<D,T> operator * (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = v1[i] * v2[i];
return result;
}
template <int D, class T>
static inline const Vec<D,T> operator / (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = v1[i] / v2[i];
return result;
}
// Dot product
template <int D, class T>
static inline const T operator ^ (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
T sum = v1[0] * v2[0];
for (int i = 1; i < D; i++)
sum += v1[i] * v2[i];
return sum;
}
#define DOT ^
// Cross product - only in 3 dimensions
template <class T>
static inline const Vec<3,T> operator % (const Vec<3,T> &v1, const Vec<3,T> &v2)
{
return Vec<3,T>(v1[1]*v2[2] - v1[2]*v2[1],
v1[2]*v2[0] - v1[0]*v2[2],
v1[0]*v2[1] - v1[1]*v2[0]);
}
#define CROSS %
// Component-wise equality and inequality (#include the usual caveats
// about comparing floats for equality...)
template <int D, class T>
static inline bool operator == (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
for (int i = 0; i < D; i++)
if (v1[i] != v2[i])
return false;
return true;
}
template <int D, class T>
static inline bool operator != (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
for (int i = 0; i < D; i++)
if (v1[i] != v2[i])
return true;
return false;
}
// Unary operators
template <int D, class T>
static inline const Vec<D,T> &operator + (const Vec<D,T> &v)
{
return v;
}
template <int D, class T>
static inline const Vec<D,T> operator - (const Vec<D,T> &v)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = -v[i];
return result;
}
template <int D, class T>
static inline bool operator ! (const Vec<D,T> &v)
{
return v.empty();
}
// Vec/scalar operators
template <int D, class T>
static inline const Vec<D,T> operator * (const T &x, const Vec<D,T> &v)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = x * v[i];
return result;
}
template <int D, class T>
static inline const Vec<D,T> operator * (const Vec<D,T> &v, const T &x)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = v[i] * x;
return result;
}
template <int D, class T>
static inline const Vec<D,T> operator / (const T &x, const Vec<D,T> &v)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = x / v[i];
return result;
}
template <int D, class T>
static inline const Vec<D,T> operator / (const Vec<D,T> &v, const T &x)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = v[i] / x;
return result;
}
// iostream operators
template <int D, class T>
static inline std::ostream &operator << (std::ostream &os, const Vec<D,T> &v)
{
os << "(";
for (int i = 0; i < D-1; i++)
os << v[i] << ", ";
return os << v[D-1] << ")";
}
template <int D, class T>
static inline std::istream &operator >> (std::istream &is, Vec<D,T> &v)
{
char c1 = 0, c2 = 0;
is >> c1;
if (c1 == '(' || c1 == '[') {
is >> v[0] >> std::ws >> c2;
for (int i = 1; i < D; i++) {
if (c2 == ',')
is >> v[i] >> std::ws >> c2;
else
is.setstate(std::ios::failbit);
}
}
if (c1 == '(' && c2 != ')')
is.setstate(std::ios::failbit);
else if (c1 == '[' && c2 != ']')
is.setstate(std::ios::failbit);
return is;
}
// Swap two Vecs. Not atomic, unlike class method.
namespace std {
template <int D, class T>
static inline void swap(const Vec<D,T> &v1, const Vec<D,T> &v2)
{
for (int i = 0; i < D; i++)
swap(v1[i], v2[i]);
}
}
// Squared length
template <int D, class T>
static inline const T len2(const Vec<D,T> &v)
{
T l2 = v[0] * v[0];
for (int i = 1; i < D; i++)
l2 += v[i] * v[i];
return l2;
}
// Length
template <int D, class T>
static inline const T len(const Vec<D,T> &v)
{
return std::sqrt(len2(v));
}
// Utility functions for square and cube, to go along with sqrt and cbrt
template <class T>
static inline T sqr(const T &x)
{
return x*x;
}
template <class T>
static inline T cube(const T &x)
{
return x*x*x;
}
// Squared distance
template <int D, class T>
static inline const T dist2(const Vec<D,T> &v1, const Vec<D,T> &v2)
{
T d2 = sqr(v2[0]-v1[0]);
for (int i = 1; i < D; i++)
d2 += sqr(v2[i]-v1[i]);
return d2;
}
// Distance
template <int D, class T>
static inline const T dist(const Vec<D,T> &v1, const Vec<D,T> &v2)
{
return std::sqrt(dist2(v1,v2));
}
// In-place normalization to unit length
template <int D, class T>
static inline Vec<D,T> normalize(Vec<D,T> &v)
{
T l = len(v);
if (unlikely(l <= T(0))) {
v[0] = T(1);
for (int i = 1; i < D; i++)
v[i] = T(0);
return v;
}
l = T(1) / l;
for (int i = 0; i < D; i++)
v[i] *= l;
return v;
}
// Area-weighted triangle face normal
template <class T>
static inline T trinorm(const T &v0, const T &v1, const T &v2)
{
return (typename T::value_type) 0.5 * ((v1 - v0) CROSS (v2 - v0));
}
// Sign of a scalar. Note that sgn(0) == 1.
template <class T>
static inline T sgn(const T &x)
{
return (x < T(0)) ? T(-1) : T(1);
}
// Utility functions based on GLSL
template <class T>
static inline T fract(const T &x)
{
return x - floor(x);
}
template <class T>
static inline T clamp(const T &x, const T &a, const T &b)
{
return x > a ? x < b ? x : b : a; // returns a on NaN
}
template <class T, class S>
static inline T mix(const T &x, const T &y, const S &a)
{
return (S(1)-a) * x + a * y;
}
template <class T>
static inline T step(const T &x, const T &a)
{
return x < a ? T(0) : T(1);
}
template <class T>
static inline T smoothstep(const T &a, const T &b, const T &x)
{
if (b <= a) return step(x,a);
T t = (x - a) / (b - a);
return t <= T(0) ? T(0) : t >= T(1) ? T(1) : t * t * (T(3) - T(2) * t);
}
template <int D, class T>
static inline T faceforward(const Vec<D,T> &N, const Vec<D,T> &I,
const Vec<D,T> &Nref)
{
return ((Nref DOT I) < T(0)) ? N : -N;
}
template <int D, class T>
static inline T reflect(const Vec<D,T> &I, const Vec<D,T> &N)
{
return I - (T(2) * (N DOT I)) * N;
}
template <int D, class T>
static inline T refract(const Vec<D,T> &I, const Vec<D,T> &N,
const T &eta)
{
T NdotI = N DOT I;
T k = T(1) - sqr(eta) * (T(1) - sqr(NdotI));
return (k < T(0)) ? T(0) : eta * I - (eta * NdotI * std::sqrt(k)) * N;
}
// C99 compatibility functions for MSVS
#ifdef _WIN32
#ifdef cbrt
# undef cbrt
#endif
inline float cbrt(float x)
{
return (x < 0.0f) ? -std::pow(-x, 1.0f / 3.0f) : std::pow(x, 1.0f / 3.0f);
}
inline double cbrt(double x)
{
return (x < 0.0) ? -std::pow(-x, 1.0 / 3.0) : std::pow(x, 1.0 / 3.0);
}
inline long double cbrt(long double x)
{
return (x < 0.0L) ? -std::pow(-x, 1.0L / 3.0L) : std::pow(x, 1.0L / 3.0L);
}
#ifdef round
# undef round
#endif
inline float round(float x)
{
return (x < 0.0f) ? float(int(x - 0.5f)) : float(int(x + 0.5f));
}
inline double round(double x)
{
return (x < 0.0f) ? double(int(x - 0.5)) : double(int(x + 0.5));
}
inline long double round(long double x)
{
return (x < 0.0f) ? (long double)(int(x - 0.5L)) : (long double)(int(x + 0.5L));
}
#ifdef trunc
# undef trunc
#endif
inline float trunc(float x)
{
return (x < 0.0f) ? float(int(x)) : float(int(x));
}
inline double trunc(double x)
{
return (x < 0.0f) ? double(int(x)) : double(int(x));
}
inline long double trunc(long double x)
{
return (x < 0.0f) ? (long double)(int(x)) : (long double)(int(x));
}
#endif
// Generic macros for declaring 1-, 2-, and 3- argument
// componentwise functions on vecs
#define VEC_DECLARE_ONEARG(name) \
template <int D, class T> \
static inline Vec<D,T> name(const Vec<D,T> &v) \
{ \
using namespace std; \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (int i = 0; i < D; i++) \
result[i] = name(v[i]); \
return result; \
}
#define VEC_DECLARE_TWOARG(name) \
template <int D, class T> \
static inline Vec<D,T> name(const Vec<D,T> &v, const T &w) \
{ \
using namespace std; \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (int i = 0; i < D; i++) \
result[i] = name(v[i], w); \
return result; \
} \
template <int D, class T> \
static inline Vec<D,T> name(const Vec<D,T> &v, const Vec<D,T> &w) \
{ \
using namespace std; \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (int i = 0; i < D; i++) \
result[i] = name(v[i], w[i]); \
return result; \
}
#define VEC_DECLARE_THREEARG(name) \
template <int D, class T> \
static inline Vec<D,T> name(const Vec<D,T> &v, const T &w, const T &x) \
{ \
using namespace std; \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (int i = 0; i < D; i++) \
result[i] = name(v[i], w, x); \
return result; \
} \
template <int D, class T> \
static inline Vec<D,T> name(const Vec<D,T> &v, const Vec<D,T> &w, const Vec<D,T> &x) \
{ \
using namespace std; \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (int i = 0; i < D; i++) \
result[i] = name(v[i], w[i], x[i]); \
return result; \
}
VEC_DECLARE_ONEARG(fabs)
VEC_DECLARE_ONEARG(floor)
VEC_DECLARE_ONEARG(ceil)
VEC_DECLARE_ONEARG(round)
VEC_DECLARE_ONEARG(trunc)
VEC_DECLARE_ONEARG(sin)
VEC_DECLARE_ONEARG(asin)
VEC_DECLARE_ONEARG(sinh)
VEC_DECLARE_ONEARG(cos)
VEC_DECLARE_ONEARG(acos)
VEC_DECLARE_ONEARG(cosh)
VEC_DECLARE_ONEARG(tan)
VEC_DECLARE_ONEARG(atan)
VEC_DECLARE_ONEARG(tanh)
VEC_DECLARE_ONEARG(exp)
VEC_DECLARE_ONEARG(log)
VEC_DECLARE_ONEARG(sqrt)
VEC_DECLARE_ONEARG(sqr)
VEC_DECLARE_ONEARG(cbrt)
VEC_DECLARE_ONEARG(cube)
VEC_DECLARE_ONEARG(sgn)
VEC_DECLARE_TWOARG(atan2)
VEC_DECLARE_TWOARG(pow)
VEC_DECLARE_TWOARG(fmod)
VEC_DECLARE_TWOARG(step)
namespace std {
VEC_DECLARE_TWOARG(min)
VEC_DECLARE_TWOARG(max)
}
VEC_DECLARE_THREEARG(smoothstep)
VEC_DECLARE_THREEARG(clamp)
#undef VEC_DECLARE_ONEARG
#undef VEC_DECLARE_TWOARG
#undef VEC_DECLARE_THREEARG
// Inject into std namespace
namespace std {
using ::fabs;
using ::floor;
using ::ceil;
using ::round;
using ::trunc;
using ::sin;
using ::asin;
using ::sinh;
using ::cos;
using ::acos;
using ::cosh;
using ::tan;
using ::atan;
using ::tanh;
using ::exp;
using ::log;
using ::sqrt;
using ::cbrt;
using ::atan2;
using ::pow;
using ::fmod;
}
// Both valarrays and GLSL use abs() on a vector to mean fabs().
// Let's do the same...
template <int D, class T>
static inline Vec<D,T> abs(const Vec<D,T> &v)
{
return fabs(v);
}
#endif
|
ratecontrol.c | /***************************************************-*- coding: iso-8859-1 -*-
* ratecontrol.c: h264 encoder library (Rate Control)
*****************************************************************************
* Copyright (C) 2005-2008 x264 project
*
* Authors: Loren Merritt <lorenm@u.washington.edu>
* Michael Niedermayer <michaelni@gmx.at>
* Gabriel Bouvigne <gabriel.bouvigne@joost.com>
* Jason Garrett-Glaser <darkshikari@gmail.com>
* M�ns Rullg�rd <mru@mru.ath.cx>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*****************************************************************************/
#define _ISOC99_SOURCE
#undef NDEBUG // always check asserts, the speed effect is far too small to disable them
#include <math.h>
#include <omp.h>
#include <limits.h>
#include <assert.h>
#include "common/common.h"
#include "common/cpu.h"
#include "ratecontrol.h"
typedef struct
{
int pict_type;
int kept_as_ref;
float qscale;
int mv_bits;
int tex_bits;
int misc_bits;
uint64_t expected_bits; /*total expected bits up to the current frame (current one excluded)*/
double expected_vbv;
float new_qscale;
int new_qp;
int i_count;
int p_count;
int s_count;
float blurred_complexity;
char direct_mode;
} ratecontrol_entry_t;
typedef struct
{
double coeff;
double count;
double decay;
} predictor_t;
struct x264_ratecontrol_t
{
/* constants */
int b_abr;
int b_2pass;
int b_vbv;
int b_vbv_min_rate;
double fps;
double bitrate;
double rate_tolerance;
int nmb; /* number of macroblocks in a frame */
int qp_constant[5];
/* current frame */
ratecontrol_entry_t *rce;
int qp; /* qp for current frame */
int qpm; /* qp for current macroblock */
float f_qpm; /* qp for current macroblock: precise float for AQ */
float qpa_rc; /* average of macroblocks' qp before aq */
float qpa_aq; /* average of macroblocks' qp after aq */
int qp_force;
/* VBV stuff */
double buffer_size;
double buffer_fill_final; /* real buffer as of the last finished frame */
double buffer_fill; /* planned buffer, if all in-progress frames hit their bit budget */
double buffer_rate; /* # of bits added to buffer_fill after each frame */
predictor_t *pred; /* predict frame size from satd */
/* ABR stuff */
int last_satd;
double last_rceq;
double cplxr_sum; /* sum of bits*qscale/rceq */
double expected_bits_sum; /* sum of qscale2bits after rceq, ratefactor, and overflow, only includes finished frames */
double wanted_bits_window; /* target bitrate * window */
double cbr_decay;
double short_term_cplxsum;
double short_term_cplxcount;
double rate_factor_constant;
double ip_offset;
double pb_offset;
/* 2pass stuff */
FILE *p_stat_file_out;
char *psz_stat_file_tmpname;
int num_entries; /* number of ratecontrol_entry_ts */
ratecontrol_entry_t *entry; /* FIXME: copy needed data and free this once init is done */
double last_qscale;
double last_qscale_for[5]; /* last qscale for a specific pict type, used for max_diff & ipb factor stuff */
int last_non_b_pict_type;
double accum_p_qp; /* for determining I-frame quant */
double accum_p_norm;
double last_accum_p_norm;
double lmin[5]; /* min qscale by frame type */
double lmax[5];
double lstep; /* max change (multiply) in qscale per frame */
/* MBRC stuff */
double frame_size_estimated;
double frame_size_planned;
predictor_t *row_pred;
predictor_t row_preds[5];
predictor_t *pred_b_from_p; /* predict B-frame size from P-frame satd */
int bframes; /* # consecutive B-frames before this P-frame */
int bframe_bits; /* total cost of those frames */
int i_zones;
x264_zone_t *zones;
x264_zone_t *prev_zone;
};
static int parse_zones( x264_t *h );
static int init_pass2(x264_t *);
static float rate_estimate_qscale( x264_t *h );
static void update_vbv( x264_t *h, int bits );
static void update_vbv_plan( x264_t *h );
static double predict_size( predictor_t *p, double q, double var );
static void update_predictor( predictor_t *p, double q, double var, double bits );
/* Terminology:
* qp = h.264's quantizer
* qscale = linearized quantizer = Lagrange multiplier
*/
static inline double qp2qscale(double qp)
{
return 0.85 * pow(2.0, ( qp - 12.0 ) / 6.0);
}
static inline double qscale2qp(double qscale)
{
return 12.0 + 6.0 * log(qscale/0.85) / log(2.0);
}
/* Texture bitrate is not quite inversely proportional to qscale,
* probably due the the changing number of SKIP blocks.
* MV bits level off at about qp<=12, because the lambda used
* for motion estimation is constant there. */
static inline double qscale2bits(ratecontrol_entry_t *rce, double qscale)
{
if(qscale<0.1)
qscale = 0.1;
return (rce->tex_bits + .1) * pow( rce->qscale / qscale, 1.1 )
+ rce->mv_bits * pow( X264_MAX(rce->qscale, 1) / X264_MAX(qscale, 1), 0.5 )
+ rce->misc_bits;
}
// Find the total AC energy of the block in all planes.
static NOINLINE int ac_energy_mb( x264_t *h, int mb_x, int mb_y, x264_frame_t *frame )
{
/* This function contains annoying hacks because GCC has a habit of reordering emms
* and putting it after floating point ops. As a result, we put the emms at the end of the
* function and make sure that its always called before the float math. Noinline makes
* sure no reordering goes on. */
unsigned int var=0, sad, i;
for( i=0; i<3; i++ )
{
int w = i ? 8 : 16;
int stride = frame->i_stride[i];
int offset = h->mb.b_interlaced
? w * (mb_x + (mb_y&~1) * stride) + (mb_y&1) * stride
: w * (mb_x + mb_y * stride);
int pix = i ? PIXEL_8x8 : PIXEL_16x16;
stride <<= h->mb.b_interlaced;
var += h->pixf.var[pix]( frame->plane[i]+offset, stride, &sad );
}
var = X264_MAX(var,1);
x264_emms();
return var;
}
void x264_adaptive_quant_frame( x264_t *h, x264_frame_t *frame )
{
int mb_x, mb_y;
for( mb_y=0; mb_y<h->sps->i_mb_height; mb_y++ )
for( mb_x=0; mb_x<h->sps->i_mb_width; mb_x++ )
{
int energy = ac_energy_mb( h, mb_x, mb_y, frame );
/* 10 constant chosen to result in approximately the same overall bitrate as without AQ. */
float qp_adj = h->param.rc.f_aq_strength * 1.5 * (logf(energy) - 10.0);
frame->f_qp_offset[mb_x + mb_y*h->mb.i_mb_stride] = qp_adj;
if( h->frames.b_have_lowres )
frame->i_inv_qscale_factor[mb_x+mb_y*h->mb.i_mb_stride] = FIX8(pow(2.0,-qp_adj/6.0));
}
}
/*****************************************************************************
* x264_adaptive_quant:
* adjust macroblock QP based on variance (AC energy) of the MB.
* high variance = higher QP
* low variance = lower QP
* This generally increases SSIM and lowers PSNR.
*****************************************************************************/
void x264_adaptive_quant( x264_t *h )
{
float qp, qp_adj;
x264_emms();
qp = h->rc->f_qpm;
qp_adj = h->fenc->f_qp_offset[h->mb.i_mb_x + h->mb.i_mb_y*h->mb.i_mb_stride];
h->mb.i_qp = x264_clip3( qp + qp_adj + .5, h->param.rc.i_qp_min, h->param.rc.i_qp_max );
/* If the QP of this MB is within 1 of the previous MB, code the same QP as the previous MB,
* to lower the bit cost of the qp_delta. */
if( abs(h->mb.i_qp - h->mb.i_last_qp) == 1 )
h->mb.i_qp = h->mb.i_last_qp;
h->mb.i_chroma_qp = h->chroma_qp_table[h->mb.i_qp];
}
int x264_ratecontrol_new( x264_t *h )
{
x264_ratecontrol_t *rc;
int i;
x264_emms();
rc = h->rc = x264_malloc( h->param.i_threads * sizeof(x264_ratecontrol_t) );
memset( rc, 0, h->param.i_threads * sizeof(x264_ratecontrol_t) );
rc->b_abr = h->param.rc.i_rc_method != X264_RC_CQP && !h->param.rc.b_stat_read;
rc->b_2pass = h->param.rc.i_rc_method == X264_RC_ABR && h->param.rc.b_stat_read;
/* FIXME: use integers */
if(h->param.i_fps_num > 0 && h->param.i_fps_den > 0)
rc->fps = (float) h->param.i_fps_num / h->param.i_fps_den;
else
rc->fps = 25.0;
rc->bitrate = h->param.rc.i_bitrate * 1000.;
rc->rate_tolerance = h->param.rc.f_rate_tolerance;
rc->nmb = h->mb.i_mb_count;
rc->last_non_b_pict_type = -1;
rc->cbr_decay = 1.0;
if( h->param.rc.i_rc_method == X264_RC_CRF && h->param.rc.b_stat_read )
{
x264_log(h, X264_LOG_ERROR, "constant rate-factor is incompatible with 2pass.\n");
return -1;
}
if( h->param.rc.i_vbv_buffer_size )
{
if( h->param.rc.i_rc_method == X264_RC_CQP )
{
x264_log(h, X264_LOG_WARNING, "VBV is incompatible with constant QP, ignored.\n");
h->param.rc.i_vbv_max_bitrate = 0;
h->param.rc.i_vbv_buffer_size = 0;
}
else if( h->param.rc.i_vbv_max_bitrate == 0 )
{
x264_log( h, X264_LOG_DEBUG, "VBV maxrate unspecified, assuming CBR\n" );
h->param.rc.i_vbv_max_bitrate = h->param.rc.i_bitrate;
}
}
if( h->param.rc.i_vbv_max_bitrate < h->param.rc.i_bitrate &&
h->param.rc.i_vbv_max_bitrate > 0)
x264_log(h, X264_LOG_WARNING, "max bitrate less than average bitrate, ignored.\n");
else if( h->param.rc.i_vbv_max_bitrate > 0 &&
h->param.rc.i_vbv_buffer_size > 0 )
{
if( h->param.rc.i_vbv_buffer_size < 3 * h->param.rc.i_vbv_max_bitrate / rc->fps )
{
h->param.rc.i_vbv_buffer_size = 3 * h->param.rc.i_vbv_max_bitrate / rc->fps;
x264_log( h, X264_LOG_WARNING, "VBV buffer size too small, using %d kbit\n",
h->param.rc.i_vbv_buffer_size );
}
if( h->param.rc.f_vbv_buffer_init > 1. )
h->param.rc.f_vbv_buffer_init = x264_clip3f( h->param.rc.f_vbv_buffer_init / h->param.rc.i_vbv_buffer_size, 0, 1 );
rc->buffer_rate = h->param.rc.i_vbv_max_bitrate * 1000. / rc->fps;
rc->buffer_size = h->param.rc.i_vbv_buffer_size * 1000.;
rc->buffer_fill_final = rc->buffer_size * h->param.rc.f_vbv_buffer_init;
rc->cbr_decay = 1.0 - rc->buffer_rate / rc->buffer_size
* 0.5 * X264_MAX(0, 1.5 - rc->buffer_rate * rc->fps / rc->bitrate);
rc->b_vbv = 1;
rc->b_vbv_min_rate = !rc->b_2pass
&& h->param.rc.i_rc_method == X264_RC_ABR
&& h->param.rc.i_vbv_max_bitrate <= h->param.rc.i_bitrate;
}
else if( h->param.rc.i_vbv_max_bitrate )
{
x264_log(h, X264_LOG_WARNING, "VBV maxrate specified, but no bufsize.\n");
h->param.rc.i_vbv_max_bitrate = 0;
}
if(rc->rate_tolerance < 0.01)
{
x264_log(h, X264_LOG_WARNING, "bitrate tolerance too small, using .01\n");
rc->rate_tolerance = 0.01;
}
h->mb.b_variable_qp = rc->b_vbv || h->param.rc.i_aq_mode;
if( rc->b_abr )
{
/* FIXME ABR_INIT_QP is actually used only in CRF */
#define ABR_INIT_QP ( h->param.rc.i_rc_method == X264_RC_CRF ? h->param.rc.f_rf_constant : 24 )
rc->accum_p_norm = .01;
rc->accum_p_qp = ABR_INIT_QP * rc->accum_p_norm;
/* estimated ratio that produces a reasonable QP for the first I-frame */
rc->cplxr_sum = .01 * pow( 7.0e5, h->param.rc.f_qcompress ) * pow( h->mb.i_mb_count, 0.5 );
rc->wanted_bits_window = 1.0 * rc->bitrate / rc->fps;
rc->last_non_b_pict_type = SLICE_TYPE_I;
}
if( h->param.rc.i_rc_method == X264_RC_CRF )
{
/* arbitrary rescaling to make CRF somewhat similar to QP */
double base_cplx = h->mb.i_mb_count * (h->param.i_bframe ? 120 : 80);
rc->rate_factor_constant = pow( base_cplx, 1 - h->param.rc.f_qcompress )
/ qp2qscale( h->param.rc.f_rf_constant );
}
rc->ip_offset = 6.0 * log(h->param.rc.f_ip_factor) / log(2.0);
rc->pb_offset = 6.0 * log(h->param.rc.f_pb_factor) / log(2.0);
rc->qp_constant[SLICE_TYPE_P] = h->param.rc.i_qp_constant;
rc->qp_constant[SLICE_TYPE_I] = x264_clip3( h->param.rc.i_qp_constant - rc->ip_offset + 0.5, 0, 51 );
rc->qp_constant[SLICE_TYPE_B] = x264_clip3( h->param.rc.i_qp_constant + rc->pb_offset + 0.5, 0, 51 );
rc->lstep = pow( 2, h->param.rc.i_qp_step / 6.0 );
rc->last_qscale = qp2qscale(26);
rc->pred = x264_malloc( 5*sizeof(predictor_t) );
rc->pred_b_from_p = x264_malloc( sizeof(predictor_t) );
for( i = 0; i < 5; i++ )
{
rc->last_qscale_for[i] = qp2qscale( ABR_INIT_QP );
rc->lmin[i] = qp2qscale( h->param.rc.i_qp_min );
rc->lmax[i] = qp2qscale( h->param.rc.i_qp_max );
rc->pred[i].coeff= 2.0;
rc->pred[i].count= 1.0;
rc->pred[i].decay= 0.5;
rc->row_preds[i].coeff= .25;
rc->row_preds[i].count= 1.0;
rc->row_preds[i].decay= 0.5;
}
*rc->pred_b_from_p = rc->pred[0];
if( parse_zones( h ) < 0 )
{
x264_log( h, X264_LOG_ERROR, "failed to parse zones\n" );
return -1;
}
/* Load stat file and init 2pass algo */
if( h->param.rc.b_stat_read )
{
char *p, *stats_in, *stats_buf;
/* read 1st pass stats */
assert( h->param.rc.psz_stat_in );
stats_buf = stats_in = x264_slurp_file( h->param.rc.psz_stat_in );
if( !stats_buf )
{
x264_log(h, X264_LOG_ERROR, "ratecontrol_init: can't open stats file\n");
return -1;
}
/* check whether 1st pass options were compatible with current options */
if( !strncmp( stats_buf, "#options:", 9 ) )
{
int i;
char *opts = stats_buf;
stats_in = strchr( stats_buf, '\n' );
if( !stats_in )
return -1;
*stats_in = '\0';
stats_in++;
if( ( p = strstr( opts, "bframes=" ) ) && sscanf( p, "bframes=%d", &i )
&& h->param.i_bframe != i )
{
x264_log( h, X264_LOG_ERROR, "different number of B-frames than 1st pass (%d vs %d)\n",
h->param.i_bframe, i );
return -1;
}
/* since B-adapt doesn't (yet) take into account B-pyramid,
* the converse is not a problem */
if( strstr( opts, "b_pyramid=1" ) && !h->param.b_bframe_pyramid )
x264_log( h, X264_LOG_WARNING, "1st pass used B-pyramid, 2nd doesn't\n" );
if( ( p = strstr( opts, "keyint=" ) ) && sscanf( p, "keyint=%d", &i )
&& h->param.i_keyint_max != i )
x264_log( h, X264_LOG_WARNING, "different keyint than 1st pass (%d vs %d)\n",
h->param.i_keyint_max, i );
if( strstr( opts, "qp=0" ) && h->param.rc.i_rc_method == X264_RC_ABR )
x264_log( h, X264_LOG_WARNING, "1st pass was lossless, bitrate prediction will be inaccurate\n" );
if( ( p = strstr( opts, "b_adapt=" ) ) && sscanf( p, "b_adapt=%d", &i ) && i >= X264_B_ADAPT_NONE && i <= X264_B_ADAPT_TRELLIS )
h->param.i_bframe_adaptive = i;
else if( h->param.i_bframe )
{
x264_log( h, X264_LOG_ERROR, "b_adapt method specified in stats file not valid\n" );
return -1;
}
if( ( p = strstr( opts, "scenecut=" ) ) && sscanf( p, "scenecut=%d", &i ) && i >= -1 && i <= 100 )
{
h->param.i_scenecut_threshold = i;
h->param.b_pre_scenecut = !!strstr( p, "(pre)" );
}
else
{
x264_log( h, X264_LOG_ERROR, "scenecut method specified in stats file not valid\n" );
return -1;
}
}
/* find number of pics */
p = stats_in;
for(i=-1; p; i++)
p = strchr(p+1, ';');
if(i==0)
{
x264_log(h, X264_LOG_ERROR, "empty stats file\n");
return -1;
}
rc->num_entries = i;
if( h->param.i_frame_total < rc->num_entries && h->param.i_frame_total > 0 )
{
x264_log( h, X264_LOG_WARNING, "2nd pass has fewer frames than 1st pass (%d vs %d)\n",
h->param.i_frame_total, rc->num_entries );
}
if( h->param.i_frame_total > rc->num_entries )
{
x264_log( h, X264_LOG_ERROR, "2nd pass has more frames than 1st pass (%d vs %d)\n",
h->param.i_frame_total, rc->num_entries );
return -1;
}
rc->entry = (ratecontrol_entry_t*) x264_malloc(rc->num_entries * sizeof(ratecontrol_entry_t));
memset(rc->entry, 0, rc->num_entries * sizeof(ratecontrol_entry_t));
/* init all to skipped p frames */
for(i=0; i<rc->num_entries; i++)
{
ratecontrol_entry_t *rce = &rc->entry[i];
rce->pict_type = SLICE_TYPE_P;
rce->qscale = rce->new_qscale = qp2qscale(20);
rce->misc_bits = rc->nmb + 10;
rce->new_qp = 0;
}
/* read stats */
p = stats_in;
for(i=0; i < rc->num_entries; i++)
{
ratecontrol_entry_t *rce;
int frame_number;
char pict_type;
int e;
char *next;
float qp;
next= strchr(p, ';');
if(next)
{
(*next)=0; //sscanf is unbelievably slow on long strings
next++;
}
e = sscanf(p, " in:%d ", &frame_number);
if(frame_number < 0 || frame_number >= rc->num_entries)
{
x264_log(h, X264_LOG_ERROR, "bad frame number (%d) at stats line %d\n", frame_number, i);
return -1;
}
rce = &rc->entry[frame_number];
rce->direct_mode = 0;
e += sscanf(p, " in:%*d out:%*d type:%c q:%f tex:%d mv:%d misc:%d imb:%d pmb:%d smb:%d d:%c",
&pict_type, &qp, &rce->tex_bits,
&rce->mv_bits, &rce->misc_bits, &rce->i_count, &rce->p_count,
&rce->s_count, &rce->direct_mode);
switch(pict_type)
{
case 'I': rce->kept_as_ref = 1;
case 'i': rce->pict_type = SLICE_TYPE_I; break;
case 'P': rce->pict_type = SLICE_TYPE_P; break;
case 'B': rce->kept_as_ref = 1;
case 'b': rce->pict_type = SLICE_TYPE_B; break;
default: e = -1; break;
}
if(e < 10)
{
x264_log(h, X264_LOG_ERROR, "statistics are damaged at line %d, parser out=%d\n", i, e);
return -1;
}
rce->qscale = qp2qscale(qp);
p = next;
}
x264_free(stats_buf);
if(h->param.rc.i_rc_method == X264_RC_ABR)
{
if(init_pass2(h) < 0) return -1;
} /* else we're using constant quant, so no need to run the bitrate allocation */
}
/* Open output file */
/* If input and output files are the same, output to a temp file
* and move it to the real name only when it's complete */
if( h->param.rc.b_stat_write )
{
char *p;
rc->psz_stat_file_tmpname = x264_malloc( strlen(h->param.rc.psz_stat_out) + 6 );
strcpy( rc->psz_stat_file_tmpname, h->param.rc.psz_stat_out );
strcat( rc->psz_stat_file_tmpname, ".temp" );
rc->p_stat_file_out = fopen( rc->psz_stat_file_tmpname, "wb" );
if( rc->p_stat_file_out == NULL )
{
x264_log(h, X264_LOG_ERROR, "ratecontrol_init: can't open stats file\n");
return -1;
}
p = x264_param2string( &h->param, 1 );
fprintf( rc->p_stat_file_out, "#options: %s\n", p );
x264_free( p );
}
for( i=0; i<h->param.i_threads; i++ )
{
h->thread[i]->rc = rc+i;
if( i )
{
rc[i] = rc[0];
memcpy( &h->thread[i]->param, &h->param, sizeof( x264_param_t ) );
h->thread[i]->mb.b_variable_qp = h->mb.b_variable_qp;
}
}
return 0;
}
static int parse_zone( x264_t *h, x264_zone_t *z, char *p )
{
int len = 0;
char *tok, *saveptr;
z->param = NULL;
z->f_bitrate_factor = 1;
if( 3 <= sscanf(p, "%u,%u,q=%u%n", &z->i_start, &z->i_end, &z->i_qp, &len) )
z->b_force_qp = 1;
else if( 3 <= sscanf(p, "%u,%u,b=%f%n", &z->i_start, &z->i_end, &z->f_bitrate_factor, &len) )
z->b_force_qp = 0;
else if( 2 <= sscanf(p, "%u,%u%n", &z->i_start, &z->i_end, &len) )
z->b_force_qp = 0;
else
{
x264_log( h, X264_LOG_ERROR, "invalid zone: \"%s\"\n", p );
return -1;
}
p += len;
if( !*p )
return 0;
z->param = malloc( sizeof(x264_param_t) );
memcpy( z->param, &h->param, sizeof(x264_param_t) );
while( (tok = strtok_r( p, ",", &saveptr )) )
{
char *val = strchr( tok, '=' );
if( val )
{
*val = '\0';
val++;
}
if( x264_param_parse( z->param, tok, val ) )
{
x264_log( h, X264_LOG_ERROR, "invalid zone param: %s = %s\n", tok, val );
return -1;
}
p = NULL;
}
return 0;
}
static int parse_zones( x264_t *h )
{
x264_ratecontrol_t *rc = h->rc;
int i;
if( h->param.rc.psz_zones && !h->param.rc.i_zones )
{
char *p, *tok, *saveptr;
char *psz_zones = x264_malloc( strlen(h->param.rc.psz_zones)+1 );
strcpy( psz_zones, h->param.rc.psz_zones );
h->param.rc.i_zones = 1;
for( p = psz_zones; *p; p++ )
h->param.rc.i_zones += (*p == '/');
h->param.rc.zones = x264_malloc( h->param.rc.i_zones * sizeof(x264_zone_t) );
p = psz_zones;
for( i = 0; i < h->param.rc.i_zones; i++ )
{
tok = strtok_r( p, "/", &saveptr );
if( !tok || parse_zone( h, &h->param.rc.zones[i], tok ) )
return -1;
p = NULL;
}
x264_free( psz_zones );
}
if( h->param.rc.i_zones > 0 )
{
for( i = 0; i < h->param.rc.i_zones; i++ )
{
x264_zone_t z = h->param.rc.zones[i];
if( z.i_start < 0 || z.i_start > z.i_end )
{
x264_log( h, X264_LOG_ERROR, "invalid zone: start=%d end=%d\n",
z.i_start, z.i_end );
return -1;
}
else if( !z.b_force_qp && z.f_bitrate_factor <= 0 )
{
x264_log( h, X264_LOG_ERROR, "invalid zone: bitrate_factor=%f\n",
z.f_bitrate_factor );
return -1;
}
}
rc->i_zones = h->param.rc.i_zones + 1;
rc->zones = x264_malloc( rc->i_zones * sizeof(x264_zone_t) );
memcpy( rc->zones+1, h->param.rc.zones, (rc->i_zones-1) * sizeof(x264_zone_t) );
// default zone to fall back to if none of the others match
rc->zones[0].i_start = 0;
rc->zones[0].i_end = INT_MAX;
rc->zones[0].b_force_qp = 0;
rc->zones[0].f_bitrate_factor = 1;
rc->zones[0].param = x264_malloc( sizeof(x264_param_t) );
memcpy( rc->zones[0].param, &h->param, sizeof(x264_param_t) );
for( i = 1; i < rc->i_zones; i++ )
{
if( !rc->zones[i].param )
rc->zones[i].param = rc->zones[0].param;
}
}
return 0;
}
static x264_zone_t *get_zone( x264_t *h, int frame_num )
{
int i;
for( i = h->rc->i_zones-1; i >= 0; i-- )
{
x264_zone_t *z = &h->rc->zones[i];
if( frame_num >= z->i_start && frame_num <= z->i_end )
return z;
}
return NULL;
}
void x264_ratecontrol_summary( x264_t *h )
{
x264_ratecontrol_t *rc = h->rc;
if( rc->b_abr && h->param.rc.i_rc_method == X264_RC_ABR && rc->cbr_decay > .9999 )
{
double base_cplx = h->mb.i_mb_count * (h->param.i_bframe ? 120 : 80);
x264_log( h, X264_LOG_INFO, "final ratefactor: %.2f\n",
qscale2qp( pow( base_cplx, 1 - h->param.rc.f_qcompress )
* rc->cplxr_sum / rc->wanted_bits_window ) );
}
}
void x264_ratecontrol_delete( x264_t *h )
{
x264_ratecontrol_t *rc = h->rc;
int i;
if( rc->p_stat_file_out )
{
fclose( rc->p_stat_file_out );
if( h->i_frame >= rc->num_entries )
if( rename( rc->psz_stat_file_tmpname, h->param.rc.psz_stat_out ) != 0 )
{
x264_log( h, X264_LOG_ERROR, "failed to rename \"%s\" to \"%s\"\n",
rc->psz_stat_file_tmpname, h->param.rc.psz_stat_out );
}
x264_free( rc->psz_stat_file_tmpname );
}
x264_free( rc->pred );
x264_free( rc->pred_b_from_p );
x264_free( rc->entry );
if( rc->zones )
{
x264_free( rc->zones[0].param );
if( h->param.rc.psz_zones )
for( i=1; i<rc->i_zones; i++ )
if( rc->zones[i].param != rc->zones[0].param )
x264_free( rc->zones[i].param );
x264_free( rc->zones );
}
x264_free( rc );
}
void x264_ratecontrol_set_estimated_size( x264_t *h, int bits )
{
#pragma omp critical
h->rc->frame_size_estimated = bits;
}
int x264_ratecontrol_get_estimated_size( x264_t const *h)
{
int size;
#pragma omp critical
size = h->rc->frame_size_estimated;
return size;
}
static void accum_p_qp_update( x264_t *h, float qp )
{
x264_ratecontrol_t *rc = h->rc;
rc->accum_p_qp *= .95;
rc->accum_p_norm *= .95;
rc->accum_p_norm += 1;
if( h->sh.i_type == SLICE_TYPE_I )
rc->accum_p_qp += qp + rc->ip_offset;
else
rc->accum_p_qp += qp;
}
/* Before encoding a frame, choose a QP for it */
void x264_ratecontrol_start( x264_t *h, int i_force_qp )
{
x264_ratecontrol_t *rc = h->rc;
ratecontrol_entry_t *rce = NULL;
x264_zone_t *zone = get_zone( h, h->fenc->i_frame );
float q;
x264_emms();
if( zone && (!rc->prev_zone || zone->param != rc->prev_zone->param) )
x264_encoder_reconfig( h, zone->param );
rc->prev_zone = zone;
rc->qp_force = i_force_qp;
if( h->param.rc.b_stat_read )
{
int frame = h->fenc->i_frame;
assert( frame >= 0 && frame < rc->num_entries );
rce = h->rc->rce = &h->rc->entry[frame];
if( h->sh.i_type == SLICE_TYPE_B
&& h->param.analyse.i_direct_mv_pred == X264_DIRECT_PRED_AUTO )
{
h->sh.b_direct_spatial_mv_pred = ( rce->direct_mode == 's' );
h->mb.b_direct_auto_read = ( rce->direct_mode == 's' || rce->direct_mode == 't' );
}
}
if( rc->b_vbv )
{
memset( h->fdec->i_row_bits, 0, h->sps->i_mb_height * sizeof(int) );
rc->row_pred = &rc->row_preds[h->sh.i_type];
update_vbv_plan( h );
}
if( h->sh.i_type != SLICE_TYPE_B )
{
rc->bframes = 0;
while( h->frames.current[rc->bframes] && IS_X264_TYPE_B(h->frames.current[rc->bframes]->i_type) )
rc->bframes++;
}
if( i_force_qp )
{
q = i_force_qp - 1;
}
else if( rc->b_abr )
{
q = qscale2qp( rate_estimate_qscale( h ) );
}
else if( rc->b_2pass )
{
rce->new_qscale = rate_estimate_qscale( h );
q = qscale2qp( rce->new_qscale );
}
else /* CQP */
{
if( h->sh.i_type == SLICE_TYPE_B && h->fdec->b_kept_as_ref )
q = ( rc->qp_constant[ SLICE_TYPE_B ] + rc->qp_constant[ SLICE_TYPE_P ] ) / 2;
else
q = rc->qp_constant[ h->sh.i_type ];
if( zone )
{
if( zone->b_force_qp )
q += zone->i_qp - rc->qp_constant[SLICE_TYPE_P];
else
q -= 6*log(zone->f_bitrate_factor)/log(2);
}
}
rc->qpa_rc =
rc->qpa_aq = 0;
h->fdec->f_qp_avg_rc =
h->fdec->f_qp_avg_aq =
rc->qpm =
rc->qp = x264_clip3( (int)(q + 0.5), 0, 51 );
rc->f_qpm = q;
if( rce )
rce->new_qp = rc->qp;
/* accum_p_qp needs to be here so that future frames can benefit from the
* data before this frame is done. but this only works because threading
* guarantees to not re-encode any frames. so the non-threaded case does
* accum_p_qp later. */
if( h->param.i_threads > 1 )
accum_p_qp_update( h, rc->qp );
if( h->sh.i_type != SLICE_TYPE_B )
rc->last_non_b_pict_type = h->sh.i_type;
}
static double predict_row_size( x264_t *h, int y, int qp )
{
/* average between two predictors:
* absolute SATD, and scaled bit cost of the colocated row in the previous frame */
x264_ratecontrol_t *rc = h->rc;
double pred_s = predict_size( rc->row_pred, qp2qscale(qp), h->fdec->i_row_satd[y] );
double pred_t = 0;
if( h->sh.i_type != SLICE_TYPE_I
&& h->fref0[0]->i_type == h->fdec->i_type
&& h->fref0[0]->i_row_satd[y] > 0
&& (abs(h->fref0[0]->i_row_satd[y] - h->fdec->i_row_satd[y]) < h->fdec->i_row_satd[y]/2))
{
pred_t = h->fref0[0]->i_row_bits[y] * h->fdec->i_row_satd[y] / h->fref0[0]->i_row_satd[y]
* qp2qscale(h->fref0[0]->i_row_qp[y]) / qp2qscale(qp);
}
if( pred_t == 0 )
pred_t = pred_s;
return (pred_s + pred_t) / 2;
}
static double row_bits_so_far( x264_t *h, int y )
{
int i;
double bits = 0;
for( i = 0; i <= y; i++ )
bits += h->fdec->i_row_bits[i];
return bits;
}
static double predict_row_size_sum( x264_t *h, int y, int qp )
{
int i;
double bits = row_bits_so_far(h, y);
for( i = y+1; i < h->sps->i_mb_height; i++ )
bits += predict_row_size( h, i, qp );
return bits;
}
void x264_ratecontrol_mb( x264_t *h, int bits )
{
x264_ratecontrol_t *rc = h->rc;
const int y = h->mb.i_mb_y;
x264_emms();
h->fdec->i_row_bits[y] += bits;
rc->qpa_rc += rc->f_qpm;
rc->qpa_aq += h->mb.i_qp;
if( h->mb.i_mb_x != h->sps->i_mb_width - 1 || !rc->b_vbv)
return;
h->fdec->i_row_qp[y] = rc->qpm;
if( h->sh.i_type == SLICE_TYPE_B )
{
/* B-frames shouldn't use lower QP than their reference frames.
* This code is a bit overzealous in limiting B-frame quantizers, but it helps avoid
* underflows due to the fact that B-frames are not explicitly covered by VBV. */
if( y < h->sps->i_mb_height-1 )
{
int i_estimated;
int avg_qp = X264_MAX(h->fref0[0]->i_row_qp[y+1], h->fref1[0]->i_row_qp[y+1])
+ rc->pb_offset * ((h->fenc->i_type == X264_TYPE_BREF) ? 0.5 : 1);
rc->qpm = X264_MIN(X264_MAX( rc->qp, avg_qp), 51); //avg_qp could go higher than 51 due to pb_offset
i_estimated = row_bits_so_far(h, y); //FIXME: compute full estimated size
if (i_estimated > h->rc->frame_size_planned)
x264_ratecontrol_set_estimated_size(h, i_estimated);
}
}
else
{
update_predictor( rc->row_pred, qp2qscale(rc->qpm), h->fdec->i_row_satd[y], h->fdec->i_row_bits[y] );
/* tweak quality based on difference from predicted size */
if( y < h->sps->i_mb_height-1 && h->stat.i_slice_count[h->sh.i_type] > 0 )
{
int prev_row_qp = h->fdec->i_row_qp[y];
int b0 = predict_row_size_sum( h, y, rc->qpm );
int b1 = b0;
int i_qp_max = X264_MIN( prev_row_qp + h->param.rc.i_qp_step, h->param.rc.i_qp_max );
int i_qp_min = X264_MAX( prev_row_qp - h->param.rc.i_qp_step, h->param.rc.i_qp_min );
float buffer_left_planned = rc->buffer_fill - rc->frame_size_planned;
float rc_tol = 1;
float headroom = 0;
/* Don't modify the row QPs until a sufficent amount of the bits of the frame have been processed, in case a flat */
/* area at the top of the frame was measured inaccurately. */
if(row_bits_so_far(h,y) < 0.05 * rc->frame_size_planned)
return;
headroom = buffer_left_planned/rc->buffer_size;
if(h->sh.i_type != SLICE_TYPE_I)
headroom /= 2;
rc_tol += headroom;
if( !rc->b_vbv_min_rate )
i_qp_min = X264_MAX( i_qp_min, h->sh.i_qp );
while( rc->qpm < i_qp_max
&& (b1 > rc->frame_size_planned * rc_tol
|| (rc->buffer_fill - b1 < buffer_left_planned * 0.5)))
{
rc->qpm ++;
b1 = predict_row_size_sum( h, y, rc->qpm );
}
/* avoid VBV underflow */
while( (rc->qpm < h->param.rc.i_qp_max)
&& (rc->buffer_fill - b1 < rc->buffer_size * 0.005))
{
rc->qpm ++;
b1 = predict_row_size_sum( h, y, rc->qpm );
}
while( rc->qpm > i_qp_min
&& rc->qpm > h->fdec->i_row_qp[0]
&& ((b1 < rc->frame_size_planned * 0.8 && rc->qpm <= prev_row_qp)
|| b1 < (rc->buffer_fill - rc->buffer_size + rc->buffer_rate) * 1.1) )
{
rc->qpm --;
b1 = predict_row_size_sum( h, y, rc->qpm );
}
x264_ratecontrol_set_estimated_size(h, b1);
}
}
/* loses the fractional part of the frame-wise qp */
rc->f_qpm = rc->qpm;
}
int x264_ratecontrol_qp( x264_t *h )
{
return h->rc->qpm;
}
/* In 2pass, force the same frame types as in the 1st pass */
int x264_ratecontrol_slice_type( x264_t *h, int frame_num )
{
x264_ratecontrol_t *rc = h->rc;
if( h->param.rc.b_stat_read )
{
if( frame_num >= rc->num_entries )
{
/* We could try to initialize everything required for ABR and
* adaptive B-frames, but that would be complicated.
* So just calculate the average QP used so far. */
int i;
h->param.rc.i_qp_constant = (h->stat.i_slice_count[SLICE_TYPE_P] == 0) ? 24
: 1 + h->stat.f_slice_qp[SLICE_TYPE_P] / h->stat.i_slice_count[SLICE_TYPE_P];
rc->qp_constant[SLICE_TYPE_P] = x264_clip3( h->param.rc.i_qp_constant, 0, 51 );
rc->qp_constant[SLICE_TYPE_I] = x264_clip3( (int)( qscale2qp( qp2qscale( h->param.rc.i_qp_constant ) / fabs( h->param.rc.f_ip_factor )) + 0.5 ), 0, 51 );
rc->qp_constant[SLICE_TYPE_B] = x264_clip3( (int)( qscale2qp( qp2qscale( h->param.rc.i_qp_constant ) * fabs( h->param.rc.f_pb_factor )) + 0.5 ), 0, 51 );
x264_log(h, X264_LOG_ERROR, "2nd pass has more frames than 1st pass (%d)\n", rc->num_entries);
x264_log(h, X264_LOG_ERROR, "continuing anyway, at constant QP=%d\n", h->param.rc.i_qp_constant);
if( h->param.i_bframe_adaptive )
x264_log(h, X264_LOG_ERROR, "disabling adaptive B-frames\n");
for( i = 0; i < h->param.i_threads; i++ )
{
h->thread[i]->rc->b_abr = 0;
h->thread[i]->rc->b_2pass = 0;
h->thread[i]->param.rc.i_rc_method = X264_RC_CQP;
h->thread[i]->param.rc.b_stat_read = 0;
h->thread[i]->param.i_bframe_adaptive = 0;
h->thread[i]->param.b_pre_scenecut = 0;
h->thread[i]->param.i_scenecut_threshold = -1;
if( h->thread[i]->param.i_bframe > 1 )
h->thread[i]->param.i_bframe = 1;
}
return X264_TYPE_AUTO;
}
switch( rc->entry[frame_num].pict_type )
{
case SLICE_TYPE_I:
return rc->entry[frame_num].kept_as_ref ? X264_TYPE_IDR : X264_TYPE_I;
case SLICE_TYPE_B:
return rc->entry[frame_num].kept_as_ref ? X264_TYPE_BREF : X264_TYPE_B;
case SLICE_TYPE_P:
default:
return X264_TYPE_P;
}
}
else
{
return X264_TYPE_AUTO;
}
}
/* After encoding one frame, save stats and update ratecontrol state */
void x264_ratecontrol_end( x264_t *h, int bits )
{
x264_ratecontrol_t *rc = h->rc;
const int *mbs = h->stat.frame.i_mb_count;
int i;
x264_emms();
h->stat.frame.i_mb_count_skip = mbs[P_SKIP] + mbs[B_SKIP];
h->stat.frame.i_mb_count_i = mbs[I_16x16] + mbs[I_8x8] + mbs[I_4x4];
h->stat.frame.i_mb_count_p = mbs[P_L0] + mbs[P_8x8];
for( i = B_DIRECT; i < B_8x8; i++ )
h->stat.frame.i_mb_count_p += mbs[i];
h->fdec->f_qp_avg_rc = rc->qpa_rc /= h->mb.i_mb_count;
h->fdec->f_qp_avg_aq = rc->qpa_aq /= h->mb.i_mb_count;
if( h->param.rc.b_stat_write )
{
char c_type = h->sh.i_type==SLICE_TYPE_I ? (h->fenc->i_poc==0 ? 'I' : 'i')
: h->sh.i_type==SLICE_TYPE_P ? 'P'
: h->fenc->b_kept_as_ref ? 'B' : 'b';
int dir_frame = h->stat.frame.i_direct_score[1] - h->stat.frame.i_direct_score[0];
int dir_avg = h->stat.i_direct_score[1] - h->stat.i_direct_score[0];
char c_direct = h->mb.b_direct_auto_write ?
( dir_frame>0 ? 's' : dir_frame<0 ? 't' :
dir_avg>0 ? 's' : dir_avg<0 ? 't' : '-' )
: '-';
fprintf( rc->p_stat_file_out,
"in:%d out:%d type:%c q:%.2f tex:%d mv:%d misc:%d imb:%d pmb:%d smb:%d d:%c;\n",
h->fenc->i_frame, h->i_frame,
c_type, rc->qpa_rc,
h->stat.frame.i_tex_bits,
h->stat.frame.i_mv_bits,
h->stat.frame.i_misc_bits,
h->stat.frame.i_mb_count_i,
h->stat.frame.i_mb_count_p,
h->stat.frame.i_mb_count_skip,
c_direct);
}
if( rc->b_abr )
{
if( h->sh.i_type != SLICE_TYPE_B )
rc->cplxr_sum += bits * qp2qscale(rc->qpa_rc) / rc->last_rceq;
else
{
/* Depends on the fact that B-frame's QP is an offset from the following P-frame's.
* Not perfectly accurate with B-refs, but good enough. */
rc->cplxr_sum += bits * qp2qscale(rc->qpa_rc) / (rc->last_rceq * fabs(h->param.rc.f_pb_factor));
}
rc->cplxr_sum *= rc->cbr_decay;
rc->wanted_bits_window += rc->bitrate / rc->fps;
rc->wanted_bits_window *= rc->cbr_decay;
if( h->param.i_threads == 1 )
accum_p_qp_update( h, rc->qpa_rc );
}
if( rc->b_2pass )
{
rc->expected_bits_sum += qscale2bits( rc->rce, qp2qscale(rc->rce->new_qp) );
}
if( h->mb.b_variable_qp )
{
if( h->sh.i_type == SLICE_TYPE_B )
{
rc->bframe_bits += bits;
if( !h->frames.current[0] || !IS_X264_TYPE_B(h->frames.current[0]->i_type) )
{
update_predictor( rc->pred_b_from_p, qp2qscale(rc->qpa_rc),
h->fref1[h->i_ref1-1]->i_satd, rc->bframe_bits / rc->bframes );
rc->bframe_bits = 0;
}
}
}
update_vbv( h, bits );
}
/****************************************************************************
* 2 pass functions
***************************************************************************/
/**
* modify the bitrate curve from pass1 for one frame
*/
static double get_qscale(x264_t *h, ratecontrol_entry_t *rce, double rate_factor, int frame_num)
{
x264_ratecontrol_t *rcc= h->rc;
double q;
x264_zone_t *zone = get_zone( h, frame_num );
q = pow( rce->blurred_complexity, 1 - h->param.rc.f_qcompress );
// avoid NaN's in the rc_eq
if(!isfinite(q) || rce->tex_bits + rce->mv_bits == 0)
q = rcc->last_qscale;
else
{
rcc->last_rceq = q;
q /= rate_factor;
rcc->last_qscale = q;
}
if( zone )
{
if( zone->b_force_qp )
q = qp2qscale(zone->i_qp);
else
q /= zone->f_bitrate_factor;
}
return q;
}
static double get_diff_limited_q(x264_t *h, ratecontrol_entry_t *rce, double q)
{
x264_ratecontrol_t *rcc = h->rc;
const int pict_type = rce->pict_type;
// force I/B quants as a function of P quants
const double last_p_q = rcc->last_qscale_for[SLICE_TYPE_P];
const double last_non_b_q= rcc->last_qscale_for[rcc->last_non_b_pict_type];
if( pict_type == SLICE_TYPE_I )
{
double iq = q;
double pq = qp2qscale( rcc->accum_p_qp / rcc->accum_p_norm );
double ip_factor = fabs( h->param.rc.f_ip_factor );
/* don't apply ip_factor if the following frame is also I */
if( rcc->accum_p_norm <= 0 )
q = iq;
else if( h->param.rc.f_ip_factor < 0 )
q = iq / ip_factor;
else if( rcc->accum_p_norm >= 1 )
q = pq / ip_factor;
else
q = rcc->accum_p_norm * pq / ip_factor + (1 - rcc->accum_p_norm) * iq;
}
else if( pict_type == SLICE_TYPE_B )
{
if( h->param.rc.f_pb_factor > 0 )
q = last_non_b_q;
if( !rce->kept_as_ref )
q *= fabs( h->param.rc.f_pb_factor );
}
else if( pict_type == SLICE_TYPE_P
&& rcc->last_non_b_pict_type == SLICE_TYPE_P
&& rce->tex_bits == 0 )
{
q = last_p_q;
}
/* last qscale / qdiff stuff */
if(rcc->last_non_b_pict_type==pict_type
&& (pict_type!=SLICE_TYPE_I || rcc->last_accum_p_norm < 1))
{
double last_q = rcc->last_qscale_for[pict_type];
double max_qscale = last_q * rcc->lstep;
double min_qscale = last_q / rcc->lstep;
if (q > max_qscale) q = max_qscale;
else if(q < min_qscale) q = min_qscale;
}
rcc->last_qscale_for[pict_type] = q;
if(pict_type!=SLICE_TYPE_B)
rcc->last_non_b_pict_type = pict_type;
if(pict_type==SLICE_TYPE_I)
{
rcc->last_accum_p_norm = rcc->accum_p_norm;
rcc->accum_p_norm = 0;
rcc->accum_p_qp = 0;
}
if(pict_type==SLICE_TYPE_P)
{
float mask = 1 - pow( (float)rce->i_count / rcc->nmb, 2 );
rcc->accum_p_qp = mask * (qscale2qp(q) + rcc->accum_p_qp);
rcc->accum_p_norm = mask * (1 + rcc->accum_p_norm);
}
return q;
}
static double predict_size( predictor_t *p, double q, double var )
{
return p->coeff*var / (q*p->count);
}
static void update_predictor( predictor_t *p, double q, double var, double bits )
{
if( var < 10 )
return;
p->count *= p->decay;
p->coeff *= p->decay;
p->count ++;
p->coeff += bits*q / var;
}
// update VBV after encoding a frame
static void update_vbv( x264_t *h, int bits )
{
x264_ratecontrol_t *rcc = h->rc;
x264_ratecontrol_t *rct = h->thread[0]->rc;
if( rcc->last_satd >= h->mb.i_mb_count )
update_predictor( &rct->pred[h->sh.i_type], qp2qscale(rcc->qpa_rc), rcc->last_satd, bits );
if( !rcc->b_vbv )
return;
rct->buffer_fill_final += rct->buffer_rate - bits;
if( rct->buffer_fill_final < 0 )
x264_log( h, X264_LOG_WARNING, "VBV underflow (%.0f bits)\n", rct->buffer_fill_final );
rct->buffer_fill_final = x264_clip3f( rct->buffer_fill_final, 0, rct->buffer_size );
}
// provisionally update VBV according to the planned size of all frames currently in progress
static void update_vbv_plan( x264_t *h )
{
x264_ratecontrol_t *rcc = h->rc;
rcc->buffer_fill = h->thread[0]->rc->buffer_fill_final;
if( h->param.i_threads > 1 )
{
int j = h->rc - h->thread[0]->rc;
int i;
for( i=1; i<h->param.i_threads; i++ )
{
x264_t *t = h->thread[ (j+i)%h->param.i_threads ];
double bits = t->rc->frame_size_planned;
if( !t->b_thread_active )
continue;
bits = X264_MAX(bits, x264_ratecontrol_get_estimated_size(t));
rcc->buffer_fill += rcc->buffer_rate - bits;
rcc->buffer_fill = x264_clip3( rcc->buffer_fill, 0, rcc->buffer_size );
}
}
}
// apply VBV constraints and clip qscale to between lmin and lmax
static double clip_qscale( x264_t *h, int pict_type, double q )
{
x264_ratecontrol_t *rcc = h->rc;
double lmin = rcc->lmin[pict_type];
double lmax = rcc->lmax[pict_type];
double q0 = q;
/* B-frames are not directly subject to VBV,
* since they are controlled by the P-frames' QPs.
* FIXME: in 2pass we could modify previous frames' QP too,
* instead of waiting for the buffer to fill */
if( rcc->b_vbv &&
( pict_type == SLICE_TYPE_P ||
( pict_type == SLICE_TYPE_I && rcc->last_non_b_pict_type == SLICE_TYPE_I ) ) )
{
if( rcc->buffer_fill/rcc->buffer_size < 0.5 )
q /= x264_clip3f( 2.0*rcc->buffer_fill/rcc->buffer_size, 0.5, 1.0 );
}
if( rcc->b_vbv && rcc->last_satd > 0 )
{
/* Now a hard threshold to make sure the frame fits in VBV.
* This one is mostly for I-frames. */
double bits = predict_size( &rcc->pred[h->sh.i_type], q, rcc->last_satd );
double qf = 1.0;
if( bits > rcc->buffer_fill/2 )
qf = x264_clip3f( rcc->buffer_fill/(2*bits), 0.2, 1.0 );
q /= qf;
bits *= qf;
if( bits < rcc->buffer_rate/2 )
q *= bits*2/rcc->buffer_rate;
q = X264_MAX( q0, q );
/* Check B-frame complexity, and use up any bits that would
* overflow before the next P-frame. */
if( h->sh.i_type == SLICE_TYPE_P )
{
int nb = rcc->bframes;
double pbbits = bits;
double bbits = predict_size( rcc->pred_b_from_p, q * h->param.rc.f_pb_factor, rcc->last_satd );
double space;
if( bbits > rcc->buffer_rate )
nb = 0;
pbbits += nb * bbits;
space = rcc->buffer_fill + (1+nb)*rcc->buffer_rate - rcc->buffer_size;
if( pbbits < space )
{
q *= X264_MAX( pbbits / space,
bits / (0.5 * rcc->buffer_size) );
}
q = X264_MAX( q0-5, q );
}
if( !rcc->b_vbv_min_rate )
q = X264_MAX( q0, q );
}
if(lmin==lmax)
return lmin;
else if(rcc->b_2pass)
{
double min2 = log(lmin);
double max2 = log(lmax);
q = (log(q) - min2)/(max2-min2) - 0.5;
q = 1.0/(1.0 + exp(-4*q));
q = q*(max2-min2) + min2;
return exp(q);
}
else
return x264_clip3f(q, lmin, lmax);
}
// update qscale for 1 frame based on actual bits used so far
static float rate_estimate_qscale( x264_t *h )
{
float q;
x264_ratecontrol_t *rcc = h->rc;
ratecontrol_entry_t rce;
int pict_type = h->sh.i_type;
double lmin = rcc->lmin[pict_type];
double lmax = rcc->lmax[pict_type];
int64_t total_bits = 8*(h->stat.i_slice_size[SLICE_TYPE_I]
+ h->stat.i_slice_size[SLICE_TYPE_P]
+ h->stat.i_slice_size[SLICE_TYPE_B]);
if( rcc->b_2pass )
{
rce = *rcc->rce;
if(pict_type != rce.pict_type)
{
x264_log(h, X264_LOG_ERROR, "slice=%c but 2pass stats say %c\n",
slice_type_to_char[pict_type], slice_type_to_char[rce.pict_type]);
}
}
if( pict_type == SLICE_TYPE_B )
{
/* B-frames don't have independent ratecontrol, but rather get the
* average QP of the two adjacent P-frames + an offset */
int i0 = IS_X264_TYPE_I(h->fref0[0]->i_type);
int i1 = IS_X264_TYPE_I(h->fref1[0]->i_type);
int dt0 = abs(h->fenc->i_poc - h->fref0[0]->i_poc);
int dt1 = abs(h->fenc->i_poc - h->fref1[0]->i_poc);
float q0 = h->fref0[0]->f_qp_avg_rc;
float q1 = h->fref1[0]->f_qp_avg_rc;
if( h->fref0[0]->i_type == X264_TYPE_BREF )
q0 -= rcc->pb_offset/2;
if( h->fref1[0]->i_type == X264_TYPE_BREF )
q1 -= rcc->pb_offset/2;
if(i0 && i1)
q = (q0 + q1) / 2 + rcc->ip_offset;
else if(i0)
q = q1;
else if(i1)
q = q0;
else
q = (q0*dt1 + q1*dt0) / (dt0 + dt1);
if(h->fenc->b_kept_as_ref)
q += rcc->pb_offset/2;
else
q += rcc->pb_offset;
rcc->frame_size_planned = predict_size( rcc->pred_b_from_p, q, h->fref1[h->i_ref1-1]->i_satd );
x264_ratecontrol_set_estimated_size(h, rcc->frame_size_planned);
rcc->last_satd = 0;
return qp2qscale(q);
}
else
{
double abr_buffer = 2 * rcc->rate_tolerance * rcc->bitrate;
if( rcc->b_2pass )
{
//FIXME adjust abr_buffer based on distance to the end of the video
int64_t diff;
int64_t predicted_bits = total_bits;
if( rcc->b_vbv )
{
if( h->param.i_threads > 1 )
{
int j = h->rc - h->thread[0]->rc;
int i;
for( i=1; i<h->param.i_threads; i++ )
{
x264_t *t = h->thread[ (j+i)%h->param.i_threads ];
double bits = t->rc->frame_size_planned;
if( !t->b_thread_active )
continue;
bits = X264_MAX(bits, x264_ratecontrol_get_estimated_size(t));
predicted_bits += (int64_t)bits;
}
}
}
else
{
if( h->fenc->i_frame < h->param.i_threads )
predicted_bits += (int64_t)h->fenc->i_frame * rcc->bitrate / rcc->fps;
else
predicted_bits += (int64_t)(h->param.i_threads - 1) * rcc->bitrate / rcc->fps;
}
diff = predicted_bits - (int64_t)rce.expected_bits;
q = rce.new_qscale;
q /= x264_clip3f((double)(abr_buffer - diff) / abr_buffer, .5, 2);
if( ((h->fenc->i_frame + 1 - h->param.i_threads) >= rcc->fps) &&
(rcc->expected_bits_sum > 0))
{
/* Adjust quant based on the difference between
* achieved and expected bitrate so far */
double time = (double)h->fenc->i_frame / rcc->num_entries;
double w = x264_clip3f( time*100, 0.0, 1.0 );
q *= pow( (double)total_bits / rcc->expected_bits_sum, w );
}
if( rcc->b_vbv )
{
/* Do not overflow vbv */
double expected_size = qscale2bits(&rce, q);
double expected_vbv = rcc->buffer_fill + rcc->buffer_rate - expected_size;
double expected_fullness = rce.expected_vbv / rcc->buffer_size;
double qmax = q*(2 - expected_fullness);
double size_constraint = 1 + expected_fullness;
qmax = X264_MAX(qmax, rce.new_qscale);
if (expected_fullness < .05)
qmax = lmax;
qmax = X264_MIN(qmax, lmax);
while( ((expected_vbv < rce.expected_vbv/size_constraint) && (q < qmax)) ||
((expected_vbv < 0) && (q < lmax)))
{
q *= 1.05;
expected_size = qscale2bits(&rce, q);
expected_vbv = rcc->buffer_fill + rcc->buffer_rate - expected_size;
}
rcc->last_satd = x264_rc_analyse_slice( h );
}
q = x264_clip3f( q, lmin, lmax );
}
else /* 1pass ABR */
{
/* Calculate the quantizer which would have produced the desired
* average bitrate if it had been applied to all frames so far.
* Then modulate that quant based on the current frame's complexity
* relative to the average complexity so far (using the 2pass RCEQ).
* Then bias the quant up or down if total size so far was far from
* the target.
* Result: Depending on the value of rate_tolerance, there is a
* tradeoff between quality and bitrate precision. But at large
* tolerances, the bit distribution approaches that of 2pass. */
double wanted_bits, overflow=1, lmin, lmax;
rcc->last_satd = x264_rc_analyse_slice( h );
rcc->short_term_cplxsum *= 0.5;
rcc->short_term_cplxcount *= 0.5;
rcc->short_term_cplxsum += rcc->last_satd;
rcc->short_term_cplxcount ++;
rce.tex_bits = rcc->last_satd;
rce.blurred_complexity = rcc->short_term_cplxsum / rcc->short_term_cplxcount;
rce.mv_bits = 0;
rce.p_count = rcc->nmb;
rce.i_count = 0;
rce.s_count = 0;
rce.qscale = 1;
rce.pict_type = pict_type;
if( h->param.rc.i_rc_method == X264_RC_CRF )
{
q = get_qscale( h, &rce, rcc->rate_factor_constant, h->fenc->i_frame );
}
else
{
int i_frame_done = h->fenc->i_frame + 1 - h->param.i_threads;
q = get_qscale( h, &rce, rcc->wanted_bits_window / rcc->cplxr_sum, h->fenc->i_frame );
// FIXME is it simpler to keep track of wanted_bits in ratecontrol_end?
wanted_bits = i_frame_done * rcc->bitrate / rcc->fps;
if( wanted_bits > 0 )
{
abr_buffer *= X264_MAX( 1, sqrt(i_frame_done/25) );
overflow = x264_clip3f( 1.0 + (total_bits - wanted_bits) / abr_buffer, .5, 2 );
q *= overflow;
}
}
if( pict_type == SLICE_TYPE_I && h->param.i_keyint_max > 1
/* should test _next_ pict type, but that isn't decided yet */
&& rcc->last_non_b_pict_type != SLICE_TYPE_I )
{
q = qp2qscale( rcc->accum_p_qp / rcc->accum_p_norm );
q /= fabs( h->param.rc.f_ip_factor );
}
else if( h->i_frame > 0 )
{
/* Asymmetric clipping, because symmetric would prevent
* overflow control in areas of rapidly oscillating complexity */
lmin = rcc->last_qscale_for[pict_type] / rcc->lstep;
lmax = rcc->last_qscale_for[pict_type] * rcc->lstep;
if( overflow > 1.1 && h->i_frame > 3 )
lmax *= rcc->lstep;
else if( overflow < 0.9 )
lmin /= rcc->lstep;
q = x264_clip3f(q, lmin, lmax);
}
else if( h->param.rc.i_rc_method == X264_RC_CRF )
{
q = qp2qscale( ABR_INIT_QP ) / fabs( h->param.rc.f_ip_factor );
}
//FIXME use get_diff_limited_q() ?
q = clip_qscale( h, pict_type, q );
}
rcc->last_qscale_for[pict_type] =
rcc->last_qscale = q;
if( !(rcc->b_2pass && !rcc->b_vbv) && h->fenc->i_frame == 0 )
rcc->last_qscale_for[SLICE_TYPE_P] = q;
if( rcc->b_2pass && rcc->b_vbv)
rcc->frame_size_planned = qscale2bits(&rce, q);
else
rcc->frame_size_planned = predict_size( &rcc->pred[h->sh.i_type], q, rcc->last_satd );
x264_ratecontrol_set_estimated_size(h, rcc->frame_size_planned);
return q;
}
}
void x264_thread_sync_ratecontrol( x264_t *cur, x264_t *prev, x264_t *next )
{
if( cur != prev )
{
#define COPY(var) memcpy(&cur->rc->var, &prev->rc->var, sizeof(cur->rc->var))
/* these vars are updated in x264_ratecontrol_start()
* so copy them from the context that most recently started (prev)
* to the context that's about to start (cur).
*/
COPY(accum_p_qp);
COPY(accum_p_norm);
COPY(last_satd);
COPY(last_rceq);
COPY(last_qscale_for);
COPY(last_non_b_pict_type);
COPY(short_term_cplxsum);
COPY(short_term_cplxcount);
COPY(bframes);
COPY(prev_zone);
#undef COPY
}
if( cur != next )
{
#define COPY(var) next->rc->var = cur->rc->var
/* these vars are updated in x264_ratecontrol_end()
* so copy them from the context that most recently ended (cur)
* to the context that's about to end (next)
*/
COPY(cplxr_sum);
COPY(expected_bits_sum);
COPY(wanted_bits_window);
COPY(bframe_bits);
#undef COPY
}
//FIXME row_preds[] (not strictly necessary, but would improve prediction)
/* the rest of the variables are either constant or thread-local */
}
static int find_underflow( x264_t *h, double *fills, int *t0, int *t1, int over )
{
/* find an interval ending on an overflow or underflow (depending on whether
* we're adding or removing bits), and starting on the earliest frame that
* can influence the buffer fill of that end frame. */
x264_ratecontrol_t *rcc = h->rc;
const double buffer_min = (over ? .1 : .1) * rcc->buffer_size;
const double buffer_max = .9 * rcc->buffer_size;
double fill = fills[*t0-1];
double parity = over ? 1. : -1.;
int i, start=-1, end=-1;
for(i = *t0; i < rcc->num_entries; i++)
{
fill += (rcc->buffer_rate - qscale2bits(&rcc->entry[i], rcc->entry[i].new_qscale)) * parity;
fill = x264_clip3f(fill, 0, rcc->buffer_size);
fills[i] = fill;
if(fill <= buffer_min || i == 0)
{
if(end >= 0)
break;
start = i;
}
else if(fill >= buffer_max && start >= 0)
end = i;
}
*t0 = start;
*t1 = end;
return start>=0 && end>=0;
}
static int fix_underflow( x264_t *h, int t0, int t1, double adjustment, double qscale_min, double qscale_max)
{
x264_ratecontrol_t *rcc = h->rc;
double qscale_orig, qscale_new;
int i;
int adjusted = 0;
if(t0 > 0)
t0++;
for(i = t0; i <= t1; i++)
{
qscale_orig = rcc->entry[i].new_qscale;
qscale_orig = x264_clip3f(qscale_orig, qscale_min, qscale_max);
qscale_new = qscale_orig * adjustment;
qscale_new = x264_clip3f(qscale_new, qscale_min, qscale_max);
rcc->entry[i].new_qscale = qscale_new;
adjusted = adjusted || (qscale_new != qscale_orig);
}
return adjusted;
}
static double count_expected_bits( x264_t *h )
{
x264_ratecontrol_t *rcc = h->rc;
double expected_bits = 0;
int i;
for(i = 0; i < rcc->num_entries; i++)
{
ratecontrol_entry_t *rce = &rcc->entry[i];
rce->expected_bits = expected_bits;
expected_bits += qscale2bits(rce, rce->new_qscale);
}
return expected_bits;
}
static void vbv_pass2( x264_t *h )
{
/* for each interval of buffer_full .. underflow, uniformly increase the qp of all
* frames in the interval until either buffer is full at some intermediate frame or the
* last frame in the interval no longer underflows. Recompute intervals and repeat.
* Then do the converse to put bits back into overflow areas until target size is met */
x264_ratecontrol_t *rcc = h->rc;
double *fills = x264_malloc((rcc->num_entries+1)*sizeof(double));
double all_available_bits = h->param.rc.i_bitrate * 1000. * rcc->num_entries / rcc->fps;
double expected_bits = 0;
double adjustment;
double prev_bits = 0;
int i, t0, t1;
double qscale_min = qp2qscale(h->param.rc.i_qp_min);
double qscale_max = qp2qscale(h->param.rc.i_qp_max);
int iterations = 0;
int adj_min, adj_max;
fills++;
/* adjust overall stream size */
do
{
iterations++;
prev_bits = expected_bits;
if(expected_bits != 0)
{ /* not first iteration */
adjustment = X264_MAX(X264_MIN(expected_bits / all_available_bits, 0.999), 0.9);
fills[-1] = rcc->buffer_size * h->param.rc.f_vbv_buffer_init;
t0 = 0;
/* fix overflows */
adj_min = 1;
while(adj_min && find_underflow(h, fills, &t0, &t1, 1))
{
adj_min = fix_underflow(h, t0, t1, adjustment, qscale_min, qscale_max);
t0 = t1;
}
}
fills[-1] = rcc->buffer_size * (1. - h->param.rc.f_vbv_buffer_init);
t0 = 0;
/* fix underflows -- should be done after overflow, as we'd better undersize target than underflowing VBV */
adj_max = 1;
while(adj_max && find_underflow(h, fills, &t0, &t1, 0))
adj_max = fix_underflow(h, t0, t1, 1.001, qscale_min, qscale_max);
expected_bits = count_expected_bits(h);
} while((expected_bits < .995*all_available_bits) && ((int)(expected_bits+.5) > (int)(prev_bits+.5)) );
if (!adj_max)
x264_log( h, X264_LOG_WARNING, "vbv-maxrate issue, qpmax or vbv-maxrate too low\n");
/* store expected vbv filling values for tracking when encoding */
for(i = 0; i < rcc->num_entries; i++)
rcc->entry[i].expected_vbv = rcc->buffer_size - fills[i];
x264_free(fills-1);
}
static int init_pass2( x264_t *h )
{
x264_ratecontrol_t *rcc = h->rc;
uint64_t all_const_bits = 0;
uint64_t all_available_bits = (uint64_t)(h->param.rc.i_bitrate * 1000. * rcc->num_entries / rcc->fps);
double rate_factor, step, step_mult;
double qblur = h->param.rc.f_qblur;
double cplxblur = h->param.rc.f_complexity_blur;
const int filter_size = (int)(qblur*4) | 1;
double expected_bits;
double *qscale, *blurred_qscale;
int i;
/* find total/average complexity & const_bits */
for(i=0; i<rcc->num_entries; i++)
{
ratecontrol_entry_t *rce = &rcc->entry[i];
all_const_bits += rce->misc_bits;
}
if( all_available_bits < all_const_bits)
{
x264_log(h, X264_LOG_ERROR, "requested bitrate is too low. estimated minimum is %d kbps\n",
(int)(all_const_bits * rcc->fps / (rcc->num_entries * 1000.)));
return -1;
}
/* Blur complexities, to reduce local fluctuation of QP.
* We don't blur the QPs directly, because then one very simple frame
* could drag down the QP of a nearby complex frame and give it more
* bits than intended. */
for(i=0; i<rcc->num_entries; i++)
{
ratecontrol_entry_t *rce = &rcc->entry[i];
double weight_sum = 0;
double cplx_sum = 0;
double weight = 1.0;
double gaussian_weight;
int j;
/* weighted average of cplx of future frames */
for(j=1; j<cplxblur*2 && j<rcc->num_entries-i; j++)
{
ratecontrol_entry_t *rcj = &rcc->entry[i+j];
weight *= 1 - pow( (float)rcj->i_count / rcc->nmb, 2 );
if(weight < .0001)
break;
gaussian_weight = weight * exp(-j*j/200.0);
weight_sum += gaussian_weight;
cplx_sum += gaussian_weight * (qscale2bits(rcj, 1) - rcj->misc_bits);
}
/* weighted average of cplx of past frames */
weight = 1.0;
for(j=0; j<=cplxblur*2 && j<=i; j++)
{
ratecontrol_entry_t *rcj = &rcc->entry[i-j];
gaussian_weight = weight * exp(-j*j/200.0);
weight_sum += gaussian_weight;
cplx_sum += gaussian_weight * (qscale2bits(rcj, 1) - rcj->misc_bits);
weight *= 1 - pow( (float)rcj->i_count / rcc->nmb, 2 );
if(weight < .0001)
break;
}
rce->blurred_complexity = cplx_sum / weight_sum;
}
qscale = x264_malloc(sizeof(double)*rcc->num_entries);
if(filter_size > 1)
blurred_qscale = x264_malloc(sizeof(double)*rcc->num_entries);
else
blurred_qscale = qscale;
/* Search for a factor which, when multiplied by the RCEQ values from
* each frame, adds up to the desired total size.
* There is no exact closed-form solution because of VBV constraints and
* because qscale2bits is not invertible, but we can start with the simple
* approximation of scaling the 1st pass by the ratio of bitrates.
* The search range is probably overkill, but speed doesn't matter here. */
expected_bits = 1;
for(i=0; i<rcc->num_entries; i++)
expected_bits += qscale2bits(&rcc->entry[i], get_qscale(h, &rcc->entry[i], 1.0, i));
step_mult = all_available_bits / expected_bits;
rate_factor = 0;
for(step = 1E4 * step_mult; step > 1E-7 * step_mult; step *= 0.5)
{
expected_bits = 0;
rate_factor += step;
rcc->last_non_b_pict_type = -1;
rcc->last_accum_p_norm = 1;
rcc->accum_p_norm = 0;
/* find qscale */
for(i=0; i<rcc->num_entries; i++)
{
qscale[i] = get_qscale(h, &rcc->entry[i], rate_factor, i);
}
/* fixed I/B qscale relative to P */
for(i=rcc->num_entries-1; i>=0; i--)
{
qscale[i] = get_diff_limited_q(h, &rcc->entry[i], qscale[i]);
assert(qscale[i] >= 0);
}
/* smooth curve */
if(filter_size > 1)
{
assert(filter_size%2==1);
for(i=0; i<rcc->num_entries; i++)
{
ratecontrol_entry_t *rce = &rcc->entry[i];
int j;
double q=0.0, sum=0.0;
for(j=0; j<filter_size; j++)
{
int index = i+j-filter_size/2;
double d = index-i;
double coeff = qblur==0 ? 1.0 : exp(-d*d/(qblur*qblur));
if(index < 0 || index >= rcc->num_entries)
continue;
if(rce->pict_type != rcc->entry[index].pict_type)
continue;
q += qscale[index] * coeff;
sum += coeff;
}
blurred_qscale[i] = q/sum;
}
}
/* find expected bits */
for(i=0; i<rcc->num_entries; i++)
{
ratecontrol_entry_t *rce = &rcc->entry[i];
rce->new_qscale = clip_qscale(h, rce->pict_type, blurred_qscale[i]);
assert(rce->new_qscale >= 0);
expected_bits += qscale2bits(rce, rce->new_qscale);
}
if(expected_bits > all_available_bits) rate_factor -= step;
}
x264_free(qscale);
if(filter_size > 1)
x264_free(blurred_qscale);
if(rcc->b_vbv)
vbv_pass2(h);
expected_bits = count_expected_bits(h);
if(fabs(expected_bits/all_available_bits - 1.0) > 0.01)
{
double avgq = 0;
for(i=0; i<rcc->num_entries; i++)
avgq += rcc->entry[i].new_qscale;
avgq = qscale2qp(avgq / rcc->num_entries);
if ((expected_bits > all_available_bits) || (!rcc->b_vbv))
x264_log(h, X264_LOG_WARNING, "Error: 2pass curve failed to converge\n");
x264_log(h, X264_LOG_WARNING, "target: %.2f kbit/s, expected: %.2f kbit/s, avg QP: %.4f\n",
(float)h->param.rc.i_bitrate,
expected_bits * rcc->fps / (rcc->num_entries * 1000.),
avgq);
if(expected_bits < all_available_bits && avgq < h->param.rc.i_qp_min + 2)
{
if(h->param.rc.i_qp_min > 0)
x264_log(h, X264_LOG_WARNING, "try reducing target bitrate or reducing qp_min (currently %d)\n", h->param.rc.i_qp_min);
else
x264_log(h, X264_LOG_WARNING, "try reducing target bitrate\n");
}
else if(expected_bits > all_available_bits && avgq > h->param.rc.i_qp_max - 2)
{
if(h->param.rc.i_qp_max < 51)
x264_log(h, X264_LOG_WARNING, "try increasing target bitrate or increasing qp_max (currently %d)\n", h->param.rc.i_qp_max);
else
x264_log(h, X264_LOG_WARNING, "try increasing target bitrate\n");
}
else if(!(rcc->b_2pass && rcc->b_vbv))
x264_log(h, X264_LOG_WARNING, "internal error\n");
}
return 0;
}
|
pi_mc_par.c |
#include <stdio.h>
#include <omp.h>
#include "random.h"
static long num_trials = 100000000;
int main ()
{
long i; long Ncirc = 0;
double pi, x, y, test, time;
double r = 1.0; // radius of circle. Side of squrare is 2*r
time = omp_get_wtime();
#pragma omp parallel
{
#pragma omp single
printf(" %d threads ",omp_get_num_threads());
range(-r, r);
#pragma omp for reduction(+:Ncirc) private(x,y,test)
for(i=0;i<num_trials; i++)
{
x = drandom();
y = drandom();
test = x*x + y*y;
if (test <= r*r) Ncirc++;
}
}
pi = 4.0 * ((double)Ncirc/(double)num_trials);
printf("\n %ld trials, pi is %lf ",num_trials, pi);
printf(" in %lf seconds\n",omp_get_wtime()-time);
return 0;
}
|
physics.h | #pragma once
#include "real.h"
#include "botmath.h"
#include "bot.h"
#include <stdlib.h>
#include <string.h>
#include <iostream>
using namespace std;
#define EPS real(0.0001)
#define restitution real(0.9)
class Physics
{
private:
void force(Bot &a, Bot &b)
{
if(&a == &b) return;
vec d = b.pos - a.pos;
//distance between circle centres, squared
real distance_squared = d.lengthSq();
//if they are at the 'same' place, dont do anything
if(distance_squared < EPS && distance_squared > (-EPS)) return;
//combined radius squared
real radius = a.radius + b.radius;
real radius_squared = radius * radius;
//circles too far apart
if(distance_squared > radius_squared) return;
//distance between circle centres
real distance = sqrt(distance_squared);
//normal of collision
real distinv = real(1) / distance;
vec ncoll = d * distinv;
//penetration distance
real dcoll = (radius - distance);
//separation vec
vec separation_vec = ncoll * (dcoll / (a.invmass + b.invmass)) * real(0.2);
//separate the circles
a.pos = a.pos - (separation_vec * a.invmass);
b.pos = b.pos + (separation_vec * b.invmass);
//combined velocity
vec vcoll = b.vel - a.vel;
//impact speed
real vn = vcoll * ncoll;
// obejcts are moving away. dont reflect velocity
if(vn >= real(0)) return;
//collision impulse
real j = -(real(1) + restitution) * (vn) / (a.invmass + b.invmass);
//collision impusle vec
vec impulse = ncoll * j;
//change momentum of the circles
a.vel = a.vel - (impulse * a.invmass);
b.vel = b.vel + (impulse * b.invmass);
}
void getCell(real x, real y, uint64_t &xc, uint64_t &yc)
{
xc = (x/width)*((width/cellsize) + 1);
yc = (y/height)*((height/cellsize) + 1);
}
uint64_t cantorPair(uint64_t x, uint64_t y)
{
return ((x+y)*(x+y+1))/2 + y;
}
uint64_t cellhash(uint64_t x, uint64_t y)
{
return cantorPair(x, y) % hashsize;
}
uint64_t hash(real x, real y)
{
uint64_t nx;
uint64_t ny;
getCell(x,y,nx,ny);
return cellhash(nx, ny);
}
void collide(Bot bots[], int nbots)
{
cellsize = bots[0].radius * real(2);
if(hashsize < nbots * 2)
{
hashsize = nbots * 2;
free(hashtable);
hashtable = (Bot**)calloc(hashsize, sizeof(Bot*));
}
else
{
memset(hashtable, 0, hashsize*sizeof(Bot*));
}
for(int i = 0; i < nbots; ++i)
{
Bot *bot = &bots[i];
uint64_t index = hash(bot->pos.x, bot->pos.y);
bot->next = hashtable[index];
hashtable[index] = bot;
}
//for(uint64_t i = 0; i != hashsize; ++i)
//{
// int sum = 0;
// for(Bot *curr = hashtable[i]; curr != 0; curr = curr->next)
// {
// sum += 1;
// }
// if(sum != 0)
// cerr << i << ": " << sum << endl;
//}
//exit(0);
//#pragma omp parallel for
for(int i = 0; i < nbots; ++i)
{
Bot &bot = bots[i];
uint64_t my_x;
uint64_t my_y;
getCell(bot.pos.x, bot.pos.y, my_x, my_y);
uint64_t begin_x = my_x == 0 ? 0 : (my_x - 1);
uint64_t begin_y = my_y == 0 ? 0 : (my_y - 1);
uint64_t end_x = (my_x == (width/cellsize)) ? (width/cellsize) : (my_x + 1);
uint64_t end_y = (my_y == (height/cellsize)) ? (height/cellsize) : (my_y + 1);
for(uint64_t y = begin_y; y <= end_y; ++y)
{
for(uint64_t x = begin_x; x <= end_x; ++x)
{
Bot *other = hashtable[cellhash(x,y)];
for(;other != 0; other = other->next)
{
force(bot, *other);
}
}
}
}
////#pragma omp parallel for
//for(int i = 0; i < nbots; ++i)
//{
// Bot &bot = bots[i];
// for(int k = i+1; k < nbots; ++k)
// {
// Bot &otherbot = bots[k];
// force(bot, otherbot);
// }
//}
}
void walls(Bot bots[], int nbots)
{
for(int i = 0; i < nbots; ++i)
{
Bot &bot = bots[i];
real radius = bot.radius;
int w = width - radius;
int h = height - radius;
if(bot.pos.x < radius)
{
bot.pos.x = radius;
if(bot.vel.x < 0)
bot.vel.x = -(bot.vel.x * restitution);
}
if(bot.pos.y > h)
{
bot.pos.y = h;
if(bot.vel.y > 0)
bot.vel.y = -(bot.vel.y * restitution);
}
if(bot.pos.x > w)
{
bot.pos.x = w;
if(bot.vel.x > 0)
bot.vel.x = -(bot.vel.x * restitution);
}
if(bot.pos.y < radius)
{
bot.pos.y = radius;
if(bot.vel.y < 0)
bot.vel.y = -(bot.vel.y * restitution);
}
}
}
void advance(Bot bots[], int nbots, real dt)
{
const vec zero;
for(int i = 0; i < nbots; ++i)
{
bots[i].vel = bots[i].vel + bots[i].force * (bots[i].invmass * dt);
bots[i].pos = bots[i].pos + bots[i].vel * dt;
bots[i].force = zero;
}
}
void gravity(Bot bots[], int nbots)
{
for(int i = 0; i < nbots; ++i)
{
Bot &bot = bots[i];
bot.force = bot.force + vec(0, 10);
}
}
Bot **hashtable;
uint64_t hashsize;
int width;
int height;
int cellsize;
public:
Physics()
{
cellsize = 20;
hashsize = 100;
hashtable = (Bot**)calloc(hashsize, sizeof(Bot*));
}
void tick(Bot bots[], int nbots, real dt, int w, int h)
{
width = w;
height = h;
gravity(bots, nbots);
for(int i = 0; i < 10; ++i) {
collide(bots, nbots);
}
walls(bots, nbots);
advance(bots, nbots, dt);
}
};
|
Profiles.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#define PI 3.141592
/*Structure for the particles in the original catalog*/
typedef struct Particle {
int label; /*Label of the particle*/
float pos[3]; /*Array with the position of the particle*/
float vel[3]; /*Array with the velocity of the particle*/
} PARTICLE;
/*Structure for the voids/halos*/
typedef struct void_halo {
int label; /*Label of the structure*/
float pos[3]; /*Array with the position of the center of structure*/
float rad; /*Radius of structure*/
int n; /*Number of particles in structure*/
float den; /*Density of structure*/
} VH;
/*Structure for the particles around each void*/
typedef struct Radial{
float dist; /*Distance to the structure center*/
float vr; /*Radial component of the velocity particle*/
} RAD;
/*Returns The mean of vector x*/
float mean(float *x, float sum_vol, int N){
int i;
float resp;
if(N==0)
resp = 0.0;
else{
resp = 0.0;
for(i=0;i<N;i++)
resp += x[i];
resp = resp/sum_vol;
}
return resp;
}
/*Find correct grid box of a particle*/
void indices(float x[], int xt[], float Lb, int nd){
xt[0] = (int) x[0]/Lb;
xt[1] = (int) x[1]/Lb;
xt[2] = (int) x[2]/Lb;
if(xt[0]==nd) xt[0] -= nd;
if(xt[1]==nd) xt[1] -= nd;
if(xt[2]==nd) xt[2] -= nd;
}
/*Free the counts*/
void free_cont(int ***cont, int nd){
int i, j;
for(i=0;i<nd;i++)
for(j=0;j<nd;j++)
free(cont[i][j]);
for(i=0;i<nd;i++)
free(cont[i]);
free(cont);
}
/*Free the grid of particles*/
void free_m(PARTICLE ****m, int nd){
int i, j, k;
for(i=0;i<nd;i++)
for(j=0;j<nd;j++)
for(k=0;k<nd;k++)
free(m[i][j][k]);
for(i=0;i<nd;i++)
for(j=0;j<nd;j++)
free(m[i][j]);
for(i=0;i<nd;i++)
free(m[i]);
free(m);
}
int main(int argc,char *argv[])
{
FILE *part, *structures, *out;
char partfile[100], structurefile[100], outfile[100];
int np, np2, nv, i, j, k, a, b , c, n, flag, bin, res, nd, nb, h_vel, Do_log;
int ***cont1, ***cont2, xt[3], N_cores, percent, div;
float dist, n_m, r_times, len_b, L, L_c, Lb, trash, Delta;
float volume;
PARTICLE *p, ****m;
VH *v;
if (argc != 11){
printf("Wrong number of arguments.\n");
printf("arg1: Name of the particles file\n");
printf("arg2: Name of the voids/halos file\n");
printf("arg3: Name of the output file\n");
printf("arg4: Box size\n");
printf("arg5: Catch as many times the radius of the voids/halos\n");
printf("arg6: The number of bins in the range r_times*radius\n");
printf("arg7: The particle catalog also have velocities? Yes (1) or No (0).\n");
printf("arg8: Use the log spacing in radial bins? Yes (1) or No (0).\n");
printf("arg9: Give the number of cores for the parallel computation.\n");
printf("arg10. The mean density contrast of the voids/halos in the catalog.\n\n");
exit(0);
}
sprintf(partfile,"%s", argv[1]);
sprintf(structurefile,"%s", argv[2]);
sprintf(outfile,"%s", argv[3]);
/*read the parameters given by the user*/
r_times = atof(argv[5]); /*How much grows the radius for profile*/
L = atof(argv[4]); /*Box size*/
res = atof(argv[6]); /*The number of bins in the range r_times*radius*/
h_vel = atoi(argv[7]); /*Catalog have velocities?*/
Do_log = atoi(argv[8]); /*Use the radial bins log spaced*/
N_cores = atoi(argv[9]); /*Number of cores for the parallelization*/
Delta = atof(argv[10]); /*Density contrast of the voids/halos*/
/*Check the velocity option*/
if(h_vel != 1 && h_vel !=0){
printf("You need to say if your particle catalog have velocities! Yes (1) or No (0)?\n");
exit(0);
}
/*Check the bin spacing option*/
if(Do_log != 1 && Do_log != 0){
printf("You need to say if your bins are log spaced! Yes (1) or No (0)?\n");
exit(0);
}
/*Open the particles file*/
part = fopen(partfile,"r");
if (part == NULL) {
printf("Unable to open %s\n",partfile);
exit(0);
}
/*Read the total number of particles and alocate they*/
fscanf(part,"%d", &np);
p = (PARTICLE *)malloc(np*sizeof(PARTICLE));
/*Some parameters of simulation and grid*/
nd = (int)floor(pow(np,1.0/3.0)/2+0.9); /*Number of divisons for the allocation*/
Lb = L/nd; /*Size of each sub-box*/
nb = (int)floor(L_c/Lb + 0.99); /*Number of grids that contain L_c*/
n_m = np/(L*L*L); /*The mean density in (Mpc/h)^-3*/
/*Open the voids/halos file*/
structures = fopen(structurefile,"r");
if (structures == NULL) {
printf("Unable to open %s\n",structurefile);
exit(0);
}
/*Allocating the counters*/
cont1 = (int ***)malloc(nd*sizeof(int **));
cont2 = (int ***)malloc(nd*sizeof(int **));
for(i=0;i<nd;i++){
cont1[i] = (int **)malloc(nd*sizeof(int *));
cont2[i] = (int **)malloc(nd*sizeof(int *));
}
for(i=0;i<nd;i++)
for(j=0;j<nd;j++){
cont1[i][j] = (int *)malloc(nd*sizeof(int));
cont2[i][j] = (int *)malloc(nd*sizeof(int));
}
for(i=0;i<nd;i++)
for(j=0;j<nd;j++)
for(k=0;k<nd;k++){
cont1[i][j][k] = 0;
cont2[i][j][k] = 0;
}
/*Read the position of all particles and determines yours sub-box*/
for (i=0;i<np;i++){
p[i].label = i;
for (j=0;j<3;j++)
fscanf(part,"%f", &p[i].pos[j]);
for (j=0;j<3 && (h_vel == 1);j++)
fscanf(part,"%f", &p[i].vel[j]);
indices(p[i].pos, xt, Lb, nd);
cont1[xt[0]][xt[1]][xt[2]] += 1;
}
fclose(part);
/*Allocating m*/
m = (PARTICLE ****)malloc(nd*sizeof(PARTICLE ***));
for(i=0;i<nd;i++)
m[i] = (PARTICLE ***)malloc(nd*sizeof(PARTICLE **));
for(i=0;i<nd;i++)
for(j=0;j<nd;j++)
m[i][j] = (PARTICLE **)malloc(nd*sizeof(PARTICLE *));
for(i=0;i<nd;i++)
for(j=0;j<nd;j++)
for(k=0;k<nd;k++)
m[i][j][k] = (PARTICLE *)malloc(cont1[i][j][k]*sizeof(PARTICLE));
/*Saving particles in m*/
for(i=0;i<np;i++){
indices(p[i].pos, xt, Lb, nd);
m[xt[0]][xt[1]][xt[2]][cont2[xt[0]][xt[1]][xt[2]]].label = p[i].label;
for(j=0;j<3;j++){
m[xt[0]][xt[1]][xt[2]][cont2[xt[0]][xt[1]][xt[2]]].pos[j] = p[i].pos[j];
if(h_vel == 1) m[xt[0]][xt[1]][xt[2]][cont2[xt[0]][xt[1]][xt[2]]].vel[j] = p[i].vel[j];
}
cont2[xt[0]][xt[1]][xt[2]] +=1;
}
free(p);
/*Read the total number of voids/halos and alocate they*/
fscanf(structures,"%d", &nv);
div = nv/N_cores;
v = (VH *)malloc(nv*sizeof(VH));
for(i=0;i<nv;i++){
fscanf(structures,"%d", &v[i].label);
for (j=0;j<3;j++)
fscanf(structures,"%f", &v[i].pos[j]);
fscanf(structures,"%f", &v[i].rad);
fscanf(structures,"%f", &trash);
fscanf(structures,"%d", &v[i].n);
fscanf(structures,"%f", &v[i].den);
}
fclose(structures);
/*Open the output file*/
out = fopen(outfile,"w");
if (out == NULL) {
printf("Unable to open out\n");
exit(0);
}
fprintf(out,"%d\n", nv); /*Total number of voids/halos*/
fprintf(out,"%f\n", n_m); /*The mean density of catalog*/
fprintf(out,"%d\n", res); /*Number of bins for each void*/
fprintf(out,"%d\n", Do_log); /*information about the spacing*/
fprintf(out,"%f\n", r_times); /*How much the radius of the voids/halos are get*/
printf("There are %d halos/voids to compute the profile.\n", nv);
percent = 0;
/**************************************/
/*Start to parallelize from this point*/
/**************************************/
omp_set_num_threads(N_cores);
#pragma omp parallel for private(i, j, k, a, b, c, L_c, flag, len_b, nb, percent)
/*Calculate the desnsity profile of each halo*/
for (i=0;i<nv;i++){
/*variables used in the main loop*/
float *x_c, *dx, *vels, *vols, r_times_v, distance, log_min, log_dist, log_b;
int x, y, z, *nbins, *x_t, bin, cont;
RAD *r;
/*Jump this void/halo if it have radius zero*/
if(v[i].rad == 0.0){
#pragma omp critical
{
/*Print the relevant void/halos information*/
fprintf(out,"%d %f %d %f\n", v[i].label, v[i].rad, 0, 0.0);
/*Prints the number of particle up to each bin radius*/
for(j=0;j<res;j++){
if(Do_log == 1){
if(h_vel==1) fprintf(out,"%f %d %f\n", 0.0, 0, 0.0);
else fprintf(out,"%f %d\n", 0.0, 0);
}
else{
if(h_vel == 1) fprintf(out,"%f %d %f\n", 0.0, 0, 0.0);
else fprintf(out,"%f %d\n", 0.0, 0);
}
}
}
continue;
}
/*Jump this void/halo if it has a bigger radius*/
if(r_times*v[i].rad > L/2){
#pragma omp critical
{
/*Print the relevant void/halos information*/
fprintf(out,"%d %f %d %f\n", v[i].label, v[i].rad, 0, 0.0);
/*Prints the number of particle up to each bin radius*/
for(j=0;j<res;j++){
if(Do_log == 1){
if(h_vel==1) fprintf(out,"%f %d %f\n", 0.0, 0, 0.0);
else fprintf(out,"%f %d\n", 0.0, 0);
}
else{
if(h_vel == 1) fprintf(out,"%f %d %f\n", 0.0, 0, 0.0);
else fprintf(out,"%f %d\n", 0.0, 0);
}
}
}
continue;
}
/*Array with the possible particles in the profile*/
r = (RAD *)malloc(np*sizeof(RAD));
/*Allocate the variables*/
x_c = (float *)malloc(3*sizeof(float));
dx = (float *)malloc(3*sizeof(float));
nbins = (int *)malloc(res*sizeof(int));
vols = (float *)malloc(res*sizeof(float));
vels = (float *)malloc(res*sizeof(float));
x_t = (int *)malloc(3*sizeof(int));
/*Bin lenght in normalized units*/
r_times_v = r_times;
len_b = r_times_v/res;
/*Values needed for the log spacing*/
log_min = log10(len_b);
log_b = (log10(r_times_v) - log_min)/(res-1);
/*Initialize the number, volume and radial velocity of particles in the profile */
for(j=0;j<res;j++){
nbins[j] = 0;
vols[j] = 0.0;
vels[j] = 0.0;
}
/*The center of void/halo*/
for(k=0;k<3;k++)
x_c[k] = v[i].pos[k];
/*Determine the bin of center of the void/halo*/
cont = 0;
indices(x_c, x_t, Lb, nd);
nb = (int)floor(r_times_v*v[i].rad/Lb + 0.99);
for(a=-nb;a<=nb;a++)
for(b=-nb;b<=nb;b++)
for(c=-nb;c<=nb;c++){
x = x_t[0] + a;
y = x_t[1] + b;
z = x_t[2] + c;
if(x<0) x += nd; if(x>=nd) x -= nd;
if(y<0) y += nd; if(y>=nd) y -= nd;
if(z<0) z += nd; if(z>=nd) z -= nd;
for(j=0;j<cont1[x][y][z];j++){
for (k=0;k<3;k++){
dx[k] = m[x][y][z][j].pos[k] - x_c[k];
if(dx[k] < -L/2.0)
dx[k] = dx[k] + L;
if(dx[k] > L/2.0)
dx[k] = dx[k] - L;
}
distance = sqrt(dx[0]*dx[0] + dx[1]*dx[1] + dx[2]*dx[2]); /*Evaluate the distance beetween the particle and the halo center*/
if(distance < r_times_v*v[i].rad){ /*If the distance is small than r_times times the halo radius get the main information about the particle*/
r[cont].dist = distance;
if(h_vel == 1 && distance > 0.0) r[cont].vr = (dx[0]*m[x][y][z][j].vel[0] + dx[1]*m[x][y][z][j].vel[1] + dx[2]*m[x][y][z][j].vel[2])/distance;
cont ++;
}
}
}
/*Determines the bin of each particle (in log or linear spacing) and cont this particle*/
for(j=0;j<cont;j++){
if(Do_log == 1){
log_dist = log10(r[j].dist/v[i].rad);
bin = (int)floor((log_dist - log_min)/log_b+0.99);
if(bin<0) bin = 0;
}
else{
bin = (int)floor(r[j].dist/v[i].rad/len_b);
}
nbins[bin] ++;
if(h_vel==1) vels[bin] += r[j].vr;
vols[bin] += 1.0;
}
/*Normalize the weighted velocities*/
for(j=0;j<res && (h_vel==1);j++){
if(vols[j] == 0.0) vels[j] = 0.0;
else vels[j] = vels[j]/vols[j];
}
/*This flag denotes if this void/halos grows on other halo*/
if(Delta > 1.0 && v[i].den > Delta) flag = 1;
else if(Delta > 1.0 && v[i].den <= Delta) flag = 0;
else if(Delta < 1.0 && v[i].den < Delta) flag = 1;
else if(Delta < 1.0 && v[i].den >= Delta) flag = 0;
#pragma omp critical
{
/*Print the relevant void/halos information*/
fprintf(out,"%d %f %d %f\n", v[i].label, v[i].rad, flag, r_times_v);
/*Prints the number of particle up to each bin radius*/
for(j=0;j<res;j++){
if(Do_log == 1){
if(h_vel==1) fprintf(out,"%f %d %f\n", pow(10, log_min + j*log_b), nbins[j], vels[j]);
else fprintf(out,"%f %d\n", pow(10, log_min + j*log_b), nbins[j]);
}
else{
if(h_vel == 1) fprintf(out,"%f %d %f\n", (j+1)*len_b, nbins[j], vels[j]);
else fprintf(out,"%f %d\n", (j+1)*len_b, nbins[j]);
}
}
}
free(dx);
free(x_c);
free(x_t);
free(nbins);
free(vels);
free(vols);
free(r);
if(i<div && percent < i*100/div){
percent = i*100/div;
printf("%d%% of the halos/voids profiles computed\n", percent);
}
}
/**************************/
/*Stop the parallelization*/
/**************************/
fclose(out);
/*Free the memory*/
free(v);
free_cont(cont1, nd);
free_cont(cont2, nd);
free_m(m, nd);
return 0;
}
|
quantized_conv2d.h | /* Copyright 2018 The Blueoil Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef DLK_FUNC_QUANTIZED_CONV2D_H_INCLUDED
#define DLK_FUNC_QUANTIZED_CONV2D_H_INCLUDED
#include <vector>
#include <memory>
#include <stdexcept>
#include "tensor_view.h"
#include "tensor_convert.h"
#include "operators.h"
#include "time_measurement.h"
#include "func/impl/quantized_conv2d_tiling.h"
#include "func/impl/quantized_conv2d_kn2row.h"
#ifdef _OPENMP
#include <omp.h>
#endif
template <typename T, MemoryLayout layout>
void QuantizedConv2D(const TensorView<T, layout>& input,
const kernel_t& kernel,
binary_convolution_parameters p) {
Measurement::Start("QuantizedConv2D");
constexpr T_UINT TilingInTypeBitWidth = dlk::impl::tiling_input_elem_t::BitCount;
T_UINT kh = p.normal_conv_params.kernel_height;
T_UINT kw = p.normal_conv_params.kernel_width;
T_UINT padding = p.normal_conv_params.padding;
T_UINT ih = p.normal_conv_params.input_height;
T_UINT iw = p.normal_conv_params.input_width;
T_UINT ic = p.normal_conv_params.kernel_depth;
T_UINT oc = p.normal_conv_params.output_channels;
auto size = oc * ih * iw;
if (p.device_output_buf == nullptr)
p.device_output_buf = new BIN_CONV_OUTPUT[size]();
if ((kh == 3 && kw == 3 && padding == 1) ||
(kh == 1 && kw == 1 && padding == 0)) {
#ifdef RUN_ON_FPGA
dlk::impl::kn2row_input_t::tensor_info_t<std::size_t> shape = {
(ic + QUANTIZED_PACKED::BitCount - 1) / QUANTIZED_PACKED::BitCount,
ih,
iw,
p.bin_input_bitwidth,
QUANTIZED_PACKED::BitCount
};
dlk::impl::kn2row_input_t tmp(p.device_input_buf, shape);
Measurement::Start("Tensor convert");
convert_tensor(input, tmp);
Measurement::Stop();
dlk::impl::TCAConv2d(tmp, kernel, p);
#elif defined USE_NEON || defined USE_AVX
dlk::impl::tiling_input_t::tensor_info_t<std::size_t> shape = {
ic / TilingInTypeBitWidth,
ih,
iw,
p.bin_input_bitwidth,
TilingInTypeBitWidth
};
dlk::impl::tiling_input_t tmp(p.device_input_buf, shape);
Measurement::Start("Tensor convert");
convert_tensor(input, tmp);
Measurement::Stop();
dlk::impl::QuantizedConv2DTiling(tmp, kernel, p);
#else
dlk::impl::kn2row_input_t::tensor_info_t<std::size_t> shape = {
ih,
iw,
ic / QUANTIZED_PACKED::BitCount,
p.bin_input_bitwidth,
QUANTIZED_PACKED::BitCount
};
dlk::impl::kn2row_input_t tmp(p.device_input_buf, shape);
Measurement::Start("Tensor convert");
convert_tensor(input, tmp);
Measurement::Stop();
dlk::impl::QuantizedConv2DKn2Row(tmp, kernel, p);
#endif
} else {
throw std::invalid_argument("Unsupported convolution parameter");
}
Measurement::Stop();
}
template <typename T, MemoryLayout layout>
void func_QuantizedConv2D(
const TensorView<T, layout>& input,
const kernel_t& kernel,
const TensorView<T_FLOAT, MemoryLayout::NHWC>& output,
const T_FLOAT scaling_factor,
const binary_convolution_parameters& p) {
QuantizedConv2D(input, kernel, p);
Measurement::Start("QuantizedConv2D_ApplyScalingFactor");
unsigned out_elems = p.normal_conv_params.output_height *
p.normal_conv_params.output_width *
p.normal_conv_params.output_channels;
// temporary: (2^n - 1) * (max - min)
const T_FLOAT post_qtz_factor = 2.0f / 3.0f;
int b = 32;
auto &ncp(p.normal_conv_params);
auto true_out_channels = output.get_shape()[3];
auto channel_blocks = (true_out_channels + b - 1) / b;
int out_index = 0;
for (int h = 0; h < ncp.output_height; ++h)
for (int w = 0; w < ncp.output_width; ++w)
for (int s = 0; s < channel_blocks; ++s)
for (int d = 0; d < std::min(b, (int)true_out_channels - s*b); ++d)
output.data()[out_index++] = (scaling_factor * post_qtz_factor) * p.device_output_buf[h * (b * ncp.output_width) + w * b + s * (ncp.output_height * ncp.output_width * b) + d];
Measurement::Stop();
}
template <typename T, MemoryLayout layout>
void func_QuantizedConv2D(
const TensorView<T, layout>& input,
const kernel_t& kernel,
const TensorView<T_FLOAT, MemoryLayout::NHWC>& output,
T_FLOAT scaling_factor[],
binary_convolution_parameters p) {
QuantizedConv2D(input, kernel, p);
unsigned out_elems =
p.normal_conv_params.output_height * p.normal_conv_params.output_width;
unsigned out_channels = p.normal_conv_params.output_channels;
int b = 32;
auto& ncp(p.normal_conv_params);
auto true_out_channels = output.get_shape()[3];
auto channel_blocks = (true_out_channels + b - 1) / b;
// temporary: (2^n - 1) * (max - min)
T_FLOAT post_qtz_factor = 2.0 / 3.0;
Measurement::Start("QuantizedConv2D_ApplyScalingFactor");
int out_index = 0;
for (int h = 0; h < ncp.output_height; ++h)
for (int w = 0; w < ncp.output_width; ++w)
for (int s = 0; s < channel_blocks; ++s)
for (int d = 0; d < std::min(b, (int)true_out_channels - s*b); ++d)
output.data()[out_index++] = (scaling_factor[s*b + d] * post_qtz_factor) * p.device_output_buf[h * (b * ncp.output_width) + w * b + s * (ncp.output_height * ncp.output_width * b) + d];
Measurement::Stop();
}
template<typename T, MemoryLayout layout>
void func_QuantizedConv2DWithThreshold(
const TensorView<T, layout>& input,
const kernel_t& kernel,
const TensorView<QUANTIZED_PACKED, MemoryLayout::ChHWBCl>& output,
const T_FLOAT scaling_factor,
const binary_convolution_parameters& p) {
QuantizedConv2D(input, kernel, p);
unsigned out_elems = p.normal_conv_params.output_height *
p.normal_conv_params.output_width *
p.normal_conv_params.output_channels;
const auto bytes = out_elems / 8 * p.n_bit;
Measurement::Start("Memcpy");
#ifdef _OPENMP
const int num_blocks = bytes / sizeof(QUANTIZED_PACKED);
const int num_threads = omp_get_max_threads();
const int chunk_size = (num_blocks + num_threads - 1) / num_threads;
#pragma omp parallel for
for (int i = 0; i < num_blocks; i += chunk_size) {
memcpy(output.data() + i,
(QUANTIZED_PACKED*)(p.device_output_buf) + i,
std::min(chunk_size, num_blocks - i) * sizeof(QUANTIZED_PACKED));
}
#else
memcpy(output.data(), (void*)p.device_output_buf, bytes);
#endif
Measurement::Stop();
}
template <typename T, MemoryLayout layout>
void func_QuantizedConv2DWithThreshold(
const TensorView<T, layout>& input,
const kernel_t& kernel,
const TensorView<T_FLOAT, MemoryLayout::NHWC>& output,
const T_FLOAT scaling_factor,
const binary_convolution_parameters& p) {
QuantizedConv2D(input, kernel, p);
Measurement::Start("linear_to_float");
T_FLOAT n = (1 << p.n_bit) - 1;
const auto& np = p.normal_conv_params;
const auto out_height = np.output_height;
const auto out_width = np.output_width;
const auto out_channels = np.output_channels;
const auto true_out_channels = output.get_shape()[3];
QUANTIZED_PACKED::base_t* ptr = (QUANTIZED_PACKED::base_t*)p.device_output_buf;
for (unsigned r = 0; r < out_height; ++r) {
for (unsigned c = 0; c < out_width; ++c) {
for (unsigned d = 0; d < true_out_channels; ++d) {
const auto i = r * out_width * p.n_bit + c * p.n_bit;
QUANTIZED_PACKED::base_t bits = 0;
for (unsigned digit = 0; digit < p.n_bit; ++digit) {
bits |= ((ptr[i + digit] >> d) & 1) << digit;
}
T_FLOAT tmp = (T_FLOAT)bits;
tmp = tmp / n;
output(0, r, c, d) = tmp * p.max_value;
}
}
}
Measurement::Stop();
}
template <typename T, MemoryLayout layout>
void func_QuantizedConv2DWithThreshold(
const TensorView<T, layout>& input,
const kernel_t& kernel,
const TensorView<QUANTIZED_PACKED, MemoryLayout::ChHWBCl>& output,
const T_FLOAT scaling_factor[],
const binary_convolution_parameters& p) {
func_QuantizedConv2DWithThreshold(input, kernel, output, scaling_factor[0],
p);
}
template <typename T, MemoryLayout layout>
void func_QuantizedConv2DWithThreshold(
const TensorView<T, layout>& input,
const kernel_t& kernel,
const TensorView<T_FLOAT, MemoryLayout::NHWC>& output,
T_FLOAT scaling_factor[],
binary_convolution_parameters p) {
func_QuantizedConv2DWithThreshold(input, kernel, output, scaling_factor[0],
p);
}
#endif // DLK_FUNC_QUANTIZED_CONV2D_H_INCLUDED
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include <signal.h>
#include <ctype.h>
#include "common.h"
#include "turing.h"
#include "evolve_turing.h"
#include "pqueue.h"
#define TAPE_LEN 1000
typedef enum {
start, was_1, was_2, was_3, s2_1, s3_1, s3_2, was_2_swap, was_3_swap, search_blank, error, end
} tDemoBubbleStates;
tTransTableItem demoBubbleTable[]={
// 0=start: BLANK 1 2 3
{end, E, N}, {was_1, E, R}, {was_2, E, R}, {was_3, E, R} ,
// 1=was_1: BLANK 1 2 3
{end, E, N}, {was_1, E, R}, {was_2, E, R}, {was_3, E, R} ,
// 2=was_2: BLANK 1 2 3
{end, E, N}, { s2_1, 2, L}, {was_2, E, R}, {was_3, E, R} ,
// 3=was_3: BLANK 1 2 3
{end, E, N}, { s3_1, 3, L}, { s3_2, 3, L}, {was_3, E, R} ,
// 4=s2_1: BLANK 1 2 3
{error, E, N}, {error, E, N}, {was_2_swap, 1, RR} , {error, E, N},
// 5=s3_1: BLANK 1 2 3
{error, E, N}, {error, E, N}, {error, E, N}, {was_3_swap, 1, RR} ,
// 6=s3_2: BLANK 1 2 3
{error, E, N}, {error, E, N}, {error, E, N}, {was_3_swap, 2, RR} ,
// 7=was_2_swap:BLANK 1 2 3
{search_blank, E, L}, { s2_1, 2, L}, {was_2_swap, E, R}, {was_3_swap, E, R} ,
// 8=was_3_swap:BLANK 1 2 3
{search_blank, E, L}, { s3_1, 3, L}, { s3_2, 3, L}, {was_3_swap, E, R} ,
// 9=search_blank: BLANK 1 2 3
{start, E, R}, {search_blank, E, L}, {search_blank, E, L}, {search_blank, E, L}
// 10=error (final state)
// 11=end (final state)
};
#define DEMOBUBBLE_SYMBOLS 4
tTransitions demoBubble = {
sizeof(demoBubbleTable)/sizeof(tTransTableItem)/DEMOBUBBLE_SYMBOLS,
//=10, // nr. of states
DEMOBUBBLE_SYMBOLS, // nr. of symbols
demoBubbleTable // transition table
};
tTape Sample_tapes[] = {
{SAMPLE_TAPE1, sizeof((char[])SAMPLE_TAPE1)},
{SAMPLE_TAPE2, sizeof((char[])SAMPLE_TAPE2)},
{SAMPLE_TAPE3, sizeof((char[])SAMPLE_TAPE3)},
};
void help_exit(char * progname) {
printf("%s [-b NR_OF_BESTS] [-k NR_OF_KIDS] [-o OUTPUT] [-p POPULATION_SIZE] "
"[-s STATES] [-y SYMBOLS]\nwhere:\n"
"-b BEST_CNT\n sets the number of best individuals, who are evolved. Default is 5000\n"
"-d DEGENARTION_CNT\n if this number generations has no success, then the evolution is restarted. Default is 500\n"
"-k KIDS_CNT\n sets the number of kids of the best individual. Default is 10\n"
"-p POPULATION_SIZE\n sets the population size. Default value is 10000\n"
"-s STATES\n sets the number of Turing machine states. Default value is 12\n"
"-y SYMBOLS\n sets the number of Turing machine symbols. Default value is 4\n"
"-o OUTPUT\n output directory. Default is \"output\"\n", progname);
exit(EXIT_SUCCESS);
}
/**
* @TODO Add options for:
* - sample tapes. They would be part of tParams type. Result=lots of simplification
* - importing good individuals for starting evolution where we ended
*/
void get_options(int argc, char ** argv, tParams * params) {
int i;
long val;
char * arg, * endptr;
enum {best, degeneration, kids, output, popul_size, states, symbols} arg_type=popul_size;
for (i=1; i<argc; i++) {
arg=argv[i];
if (arg[0]=='-')
switch (arg[1]) {
case 'b': arg_type=best; break;
case 'd': arg_type=degeneration; break;
case 'k': arg_type=kids; break;
case 'o': arg_type=output; break;
case 'p': arg_type=popul_size; break;
case 's': arg_type=states; break;
case 'y': arg_type=symbols; break;
default:
help_exit(argv[0]);
}
else {
if (arg_type==output)
params->output=arg;
else {
val=strtol(arg, &endptr, 10);
if (endptr==arg) help_exit(argv[0]);
switch (arg_type) {
case popul_size: params->population_size=val; break;
case symbols: params->symbols=val; break;
case states: params->states=val; break;
case kids: params->kids_cnt=val; break;
case best: params->best_cnt=val; break;
case degeneration: params->degeneration_cnt=val; break; default:;
} // switch (arg_type)
}
} // else
} // for
printf("Parameters: population size=%d, states=%d, symbols=%d, best_cnt=%d, kids_cnt=%d, degeneration_cnt=%d\n",
params->population_size, params->states, params->symbols, params->best_cnt, params->kids_cnt, params->degeneration_cnt);
}
tParams params={10000, 12, 4, 5000, 10, 1000, "output"};
volatile int log_level=LOG_NONE_0;
void sighandler(int sig)
{
int val, old_log_level=log_level;
log_level=LOG_NONE_0; // disable logging while waiting for user input...
char line[255];
signal(SIGINT, &sighandler); // reestablish this as signal handler
printf("\nCurrent parameters: log_level=%d, population size=%d, states=%d, symbols=%d,\n"
"best_cnt=%d, kids_cnt=%d, degeneration_cnt=%d\n",
old_log_level, params.population_size, params.states, params.symbols,
params.best_cnt, params.kids_cnt, params.degeneration_cnt);
printf("Enter <0..3> as log_level | [b BEST_CNT] | [d DEGENERATION_CNT] [k KIDS_CNT], 'c' for continue, anything else for exit:\n");
if (fgets(line, 255, stdin)!=NULL) {
if (isdigit(line[0])) {
val=atoi(line);
if (val<0 || val>3)
fprintf(stderr, "Log level must be in range <0,3>!\n");
else log_level=val;
} else {
val=atoi(line+1);
switch (tolower(line[0])) {
case 'b': params.best_cnt=val; break;
case 'd': params.degeneration_cnt=val; break;
case 'k':
if (val <= params.best_cnt)
params.kids_cnt = val;
else fprintf(stderr, "KIDS_CNT must be < best_cnt=%d\n", params.best_cnt);
break;
case 'c': break;
default: exit(EXIT_SUCCESS);
}
log_level=old_log_level;
}
}
printf("New parameters: log_level=%d, population size=%d, states=%d, symbols=%d,\n"
"best_cnt=%d, kids_cnt=%d, degeneration_cnt=%d\n",
log_level, params.population_size, params.states, params.symbols,
params.best_cnt, params.kids_cnt, params.degeneration_cnt);
}
int main(int argc, char **argv) {
char log[20000];
int cpus=omp_get_num_procs();
int n=sizeof(Sample_tapes)/sizeof(tTape);
tTapeMetrics metrics[n];
signal(SIGINT, &sighandler);
get_options(argc, argv, ¶ms);
calc_all_tapes_metrics(Sample_tapes, metrics, n);
printf("Using CPUs=%d\n", cpus);
//log_level=LOG_ALL_2;
eval_sorting_fitness_n_tapes(&demoBubble, Sample_tapes, n, log);
#pragma omp parallel num_threads(cpus)
evolve_turing(¶ms, Sample_tapes, n);
return 0;
}
|
dvg.c | #include <mpi.h>
extern int *cn_c;
extern int *ce_c;
extern int *ec_c;
extern int *cn_crem;
extern int *ce_crem;
extern int *ec_crem;
extern int *neighbor_map;
extern int *cedge_map;
extern int *ecell_map;
extern int *neighbor_maprem;
extern int *cedge_maprem;
extern int *ecell_maprem;
extern GVAL **neighbor_2Dbuf;
extern GVAL **neighbor_3Dbuf;
extern GVAL **cedge_2Dbuf;
extern GVAL **cedge_3Dbuf;
extern GVAL **ecell_2Dbuf;
extern GVAL **ecell_3Dbuf;
extern GVAL **neighbor_2Dbufrem;
extern GVAL **neighbor_3Dbufrem;
extern GVAL **cedge_2Dbufrem;
extern GVAL **cedge_3Dbufrem;
extern GVAL **ecell_2Dbufrem;
extern GVAL **ecell_3Dbufrem;
extern MPI_Request *mpi_send_requests;
extern MPI_Request *mpi_recv_requests;
extern int comm_tag;
#include "grid.h"
extern struct {
char *name;
int loc;
int dim;
union {
GVAL *restrict * restrict p2;
GVAL *restrict * restrict * restrict p3;
} data_pointer;
} *gv_grad;
extern struct {
char *name;
int loc;
int dim;
union {
GVAL *restrict * restrict p2;
GVAL *restrict * restrict * restrict p3;
} data_pointer;
} *gv_dvg;
void dvg(GRID * g)
{
{
{
comm_tag++;
for (int pn = 0; pn < g->mpi_world_size; pn++) {
if (pn != g->mpi_rank) {
for (int i = 0; i < (ce_crem[pn] - (pn ? ce_crem[pn - 1] : 0)); i++) {
for (int k = 0; k < g->height; k++)
cedge_3Dbufrem[pn][g->height * i + k] = gv_grad->data_pointer.p3[cedge_maprem[(pn ? ce_crem[pn - 1] * 2 : 0) + 2 * i]][k][cedge_maprem[(pn ? ce_crem[pn - 1] * 2 : 0) + 2 * i + 1]];
}
MPI_Isend(cedge_3Dbufrem[pn], (ce_crem[pn] - (pn ? ce_crem[pn - 1] : 0)) * g->height, MPI_FLOAT, pn, comm_tag, MPI_COMM_WORLD, &mpi_send_requests[pn]);
MPI_Irecv(cedge_3Dbuf[pn], (ce_c[pn] - (pn ? ce_c[pn - 1] : 0)) * g->height, MPI_FLOAT, pn, comm_tag, MPI_COMM_WORLD, &mpi_recv_requests[pn]);
}
}
MPI_Waitall(g->mpi_world_size * 2, mpi_send_requests, MPI_STATUSES_IGNORE);
for (int pn = 0; pn < g->mpi_world_size; pn++) {
if (pn != g->mpi_rank) {
for (int i = 0; i < (ce_c[pn] - (pn ? ce_c[pn - 1] : 0)); i++) {
for (int k = 0; k < g->height; k++)
gv_grad->data_pointer.p3[cedge_map[(pn ? ce_c[pn - 1] * 5 : 0) + 5 * i + 3]][k][cedge_map[(pn ? ce_c[pn - 1] * 5 : 0) + 5 * i + 4]] = cedge_3Dbuf[pn][g->height * i + k];
}
}
}
}
size_t min_block = g->mpi_rank == (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0;
size_t max_block = g->mpi_rank < (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size);
#pragma omp parallel for
for (size_t block_index = (min_block); block_index < (max_block); block_index++) {
for (size_t height_index = (0); height_index < (g->height); height_index++) {
for (size_t cell_index = (0); cell_index < (g->blkSize); cell_index++) {
gv_dvg->data_pointer.p3[(block_index)][(height_index)][(cell_index)] = 0;
for (int n = 0; n < NBRS; n++) {
gv_dvg->data_pointer.p3[(block_index)][(height_index)][(cell_index)] += g->edge_weights[n][cell_index % BLKSIZE] * gv_grad->data_pointer.p3[(g->cEdgeBlk[(n)]->data_pointer.p2[(block_index)][(cell_index)])][(height_index)][(g->cEdgeIdx[(n)]->data_pointer.p2[(block_index)][(cell_index)])];
}
}
}
}
}
}
|
conv4D_impl_CPU.c | #include <stdlib.h>
#include <assert.h>
#include <string.h>
#include <time.h>
#include "conv4D_impl.h"
conv_ret conv4d_convolve_serial_naive(conv4d_layer layer, featuremap_3d input, featuremap_3d output)
{
//Benchmarking setup
conv_ret ret;
clock_t start_t, end_t;
start_t = clock();
//Reset memory
memset(output.data, 0, featuremap_3d_size(output) * sizeof(float));
//Begin convolution
for (size_t n = 0; n < output.batches; n++)
for (size_t q = 0; q < output.height; q++)
for (size_t p = 0; p < output.width; p++)
for (size_t s = 0; s < layer.kernel_height; s++)
for (size_t r = 0; r < layer.kernel_width; r++)
for (size_t c = 0; c < input.channels; c++)
for (size_t m = 0; m < output.channels; m++)
{
size_t i_index = n * input.channels * input.width * input.height
+ (layer.stride_size * q + s) * input.channels * input.width
+ (layer.stride_size * p + r) * input.channels
+ c;
size_t o_index = n * output.channels * output.width * output.height
+ q * output.channels * output.width
+ p * output.channels
+ m;
size_t f_index = s * layer.output_channels * layer.input_channels * layer.kernel_width
+ r * layer.output_channels * layer.input_channels
+ c * layer.output_channels
+ m;
float i = input.data[i_index];
float f = layer.weights[f_index];
output.data[o_index] += i * f;
//printf("%zu %zu %zu\n", i_index, f_index, o_index);
}
//Bias
for (size_t n = 0; n < output.batches; n++)
for (size_t q = 0; q < output.height; q++)
for (size_t p = 0; p < output.width; p++)
for (size_t m = 0; m < output.channels; m++)
*(featuremap_3d_addr_of(output, n, m, p, q)) += layer.bias[m];
//End benchmarking
end_t = clock();
ret.time_elapsed = (double)(end_t - start_t) / CLOCKS_PER_SEC;
return ret;
}
conv_ret conv4d_convolve_serial_optimized(conv4d_layer layer, featuremap_3d input, featuremap_3d output, const size_t block_size)
{
//Benchmarking setup
conv_ret ret;
clock_t start_t, end_t;
start_t = clock();
//Reset memory
memset(output.data, 0, featuremap_3d_size(output) * sizeof(float));
//Convolve
size_t n, s, q;
for (size_t n0 = 0; n0 < output.batches; n0 += block_size)
for (size_t q0 = 0; q0 < output.height; q0 += block_size)
for (size_t s0 = 0; s0 < layer.kernel_height; s0+=block_size)
for (size_t p = 0; p < output.width; p++)
for (size_t r = 0; r < layer.kernel_width; r++)
for (size_t c = 0; c < input.channels; c++)
for (size_t m = 0; m < output.channels; m++)
//Blocking over n, q, and p
for (size_t n1 = 0; n1 < block_size && (n=n0+n1) < output.batches; n1++)
for (size_t q1 = 0; q1 < block_size && (q=q0+q1) < output.height; q1++)
for (size_t s1 = 0; s1 < block_size && (s=s0+s1) < layer.kernel_height; s1++){
size_t i_index = n * input.channels * input.width * input.height
+ (layer.stride_size * q + s) * input.channels * input.width
+ (layer.stride_size * p + r) * input.channels
+ c;
size_t o_index = n * output.channels * output.width * output.height
+ q * output.channels * output.width
+ p * output.channels
+ m;
size_t f_index = s * layer.output_channels * layer.input_channels * layer.kernel_width
+ r * layer.output_channels * layer.input_channels
+ c * layer.output_channels
+ m;
float i = input.data[i_index];
float f = layer.weights[f_index];
output.data[o_index] += i * f;
//printf("%I32d %zu %zu %zu\n", iteration++, i_index, f_index, o_index);
}
//Bias
for (size_t n = 0; n < output.batches; n++)
for (size_t q = 0; q < output.height; q++)
for (size_t p = 0; p < output.width; p++)
for (size_t m = 0; m < output.channels; m++)
*(featuremap_3d_addr_of(output, n, m, p, q)) += layer.bias[m];
//End benchmarking
end_t = clock();
ret.time_elapsed = (double)(end_t - start_t) / CLOCKS_PER_SEC;
return ret;
}
#ifdef THREAD_SUPPORT
#include <pthread.h>
size_t iteration = 0;
pthread_mutex_t* mutex = NULL; //To prevent multiple threads from modifying threadID
pthread_t threads[THREAD_SUPPORT];
conv4d_layer g_t_layer;
featuremap_3d g_t_input, g_t_output;
//Thread function to process a single output channel in "o" from "i" with "l"
void * conv4d_convolve_threads_naive_helper(void* arg) {
//Don't worry, we'll break out of this
while (1) {
//Grab the value of "iteration" and chuck it in "m".
//Then increment "iteration"
//Locks the mutex
pthread_mutex_lock(mutex);
//Grab the output channel to work on
size_t q = iteration++;
//Now unlock the mutex
pthread_mutex_unlock(mutex);
//Now we have an output channel "m" that hasn't been processed yet.
//Stop if there are no more channels left to process
if (q >= g_t_output.channels) {
break;
}
//Begin convolution
for (size_t n = 0; n < g_t_output.batches; n++)
for (size_t p = 0; p < g_t_output.width; p++)
for (size_t s = 0; s < g_t_layer.kernel_height; s++)
for (size_t r = 0; r < g_t_layer.kernel_width; r++)
for (size_t c = 0; c < g_t_input.channels; c++)
for (size_t m = 0; m < g_t_output.channels; m++)
{
size_t i_index = n * g_t_input.channels * g_t_input.width * g_t_input.height
+ (g_t_layer.stride_size * q + s) * g_t_input.channels * g_t_input.width
+ (g_t_layer.stride_size * p + r) * g_t_input.channels
+ c;
size_t o_index = n * g_t_output.channels * g_t_output.width * g_t_output.height
+ q * g_t_output.channels * g_t_output.width
+ p * g_t_output.channels
+ m;
size_t f_index = s * g_t_layer.output_channels * g_t_layer.input_channels * g_t_layer.kernel_width
+ r * g_t_layer.output_channels * g_t_layer.input_channels
+ c * g_t_layer.output_channels
+ m;
float i = g_t_input.data[i_index];
float f = g_t_layer.weights[f_index];
g_t_output.data[o_index] += i * f;
//printf("%zu %zu %zu\n", i_index, f_index, o_index);
}
//Bias
for (size_t n = 0; n < g_t_output.batches; n++)
for (size_t q = 0; q < g_t_output.height; q++)
for (size_t p = 0; p < g_t_output.width; p++)
for (size_t m = 0; m < g_t_output.channels; m++)
*(featuremap_3d_addr_of(g_t_output, n, m, p, q)) += g_t_layer.bias[m];
//printf("Hello from %d\n", m);
}//End while
return NULL;
}//End process
conv_ret conv4d_convolve_threads_naive(conv4d_layer layer, featuremap_3d input, featuremap_3d output)
{
//Benchmarking setup
conv_ret ret;
clock_t start_t, end_t;
start_t = clock();
//Reset memory
memset(output.data, 0, featuremap_3d_size(output) * sizeof(float));
//Get everything ready
iteration = 0;
g_t_layer = layer;
g_t_input = input;
g_t_output = output;
if (mutex == NULL) {
mutex = (pthread_mutex_t*) malloc(sizeof(pthread_mutex_t));
pthread_mutex_init(mutex, NULL);
}
//Start 'em up
for (int i = 0; i < THREAD_SUPPORT; i++) {
pthread_create(&threads[i], NULL, conv4d_convolve_threads_naive_helper, NULL);
}
//Let 'em all process and wait for them to finish
for (int i = 0; i < THREAD_SUPPORT; i++) {
pthread_join(threads[i], NULL);
}
//End benchmarking
end_t = clock();
ret.time_elapsed = (double)(end_t - start_t) / CLOCKS_PER_SEC;
return ret;
}
conv_ret conv4d_convolve_threads_optimized(conv4d_layer layer, featuremap_3d input, featuremap_3d output, const size_t block_size)
{
//Benchmarking setup
conv_ret ret;
time_t start_t, end_t;
time(&start_t);
//Reset memory
memset(output.data, 0, featuremap_3d_size(output) * sizeof(float));
//TODO stub
//End benchmarking
end_t = clock();
ret.time_elapsed = (double)(end_t - start_t) / CLOCKS_PER_SEC;
return ret;
}
#endif
#ifdef OMP_SUPPORT
#include <omp.h>
/*
OpenMP is a framework. Most issues stem from user
OpenMP is too easy. Sometimes ideas are easy and quick to implement.
But some are more expensive than others.
If you write dumb code, you will get dumb performane. Don't just blame OpenMP
1. You must pay attention to single-thread performance.
It must perform reasonable well. If it doesn't, what will happen on 10 cores, 20 cores, ...?
Remember, scalability can mask poor performance.
A slow code tends to scale better, but is often still slower.
2. Do not parallelize what does NOT matter
Never tune your code without a profiling tool.
Blindly parallelizing code is never a good idea.
Q: What profiling tools do you use and why?
Don't share data unless you have to. Use private data as much as possible.
One "parallel for" is fine. Multiple back-to-back is EVIL
Think BIG and maximize the size of parallel regions.
What NOT to do
#pragma omp parallel for
{ <Code block 1> }
#pragma omp parallel for
{ <Code block 2> }
....
#pragma omp parallel for
{ <Code block n> }
Why?
Barriers are expensive: all threads wait for last one is finished (do nothing)
What to do (only one parallel region)
#pragma omp parallel
#pragma omp for
{ <Code block 1> }
#pragma omp for (nowait)
{ <Code block 2> }
....
#pragma omp for (nowait)
{ <Code block n> }
#pragma omp end parallel
Identify opportunities for nowait use.
(a powerful feature, but be aware of data races)
Use a schedule clause in case of load balancing issue
Use a profiling tool
Every barrier matters (same is true for locks and critical regions).
Use atomic operations where possible.
At the end of the day, EVERYTHING matters
---------Memory Access---------------
The nice thing about OpenMP is that memory access "just happens"
However, there are two things to watch out for:
1. Non-Uniform Memory Access (NUMA)
2. False sharing
They have nothing to do with OpenMP and are a characteristic of using a shared memory architecture.
They are important and affect performance.
What is NUMA?
Memory is physically distributed, but logically shared.
Shared data is transparently accessible to all threads.
You don't know where the data is and shouldn't matter because the system finds it for you
It does matter:
Each processor has its own memory. Processes run on single machines.
NUMA systems allow processors to access other processor's memory.
But there is an overhead to getting data from other processors.
As core and node count go up, but it increasingly matters.
This good news is that OpenMP has great support for NUMA
False Sharing
Occurs when multiple threads modify the same block of data (cache line) at the same time
Results in cache line moving through the system (an expensive operation)
Additional cost of cache coherence updates
Okay if it happens once in a while
Very bad if frequent
Example:
#pragma omp parallel shared(a)
{
int TID = omp_get_thread_num();
a[TID] = 0.0; //False sharing
}//End of parallel sharing
With each update of "a", the cache line moves to the cache of the thread executing the update!
Homework
1. Always make a profile before and after (using profiling tool)
2. Details sometimes make all the difference
3. In many cases, a performance mystery is explained by NUMA effects, false sharing, or both
How to approach NUMA
Tuning for NUMA is about keeping threads and their data close
In OpenMP, a thread may be moved to the data, rather than moving data to threads
Affinity constructs inOpenMP control where threads run
This is a powerful OpenMP feature, but it's my responsibility to get right
So where does data get allocated then?
Managed by OS. First Touch Placment Policy allocates data page in memory closest to the thread accessing the page for the first time.
So whoever uses the allocated first owns it.
Policy is default on Linux and other OSes
What if single thread initialized most or all data?
All data ends up in memory of a single node
Increases access times
Solution: Parallelize data initiailzation part!
Example
#pragma omp parallel for scedule(static)
for(int i = 0; i<n; i++)
a[i]=0;
Matrix*Vector test code
#pragma omp parallel for default(none) shared(m,n,a,b,c) schedule(static)
for(int i = 0; i < m; i++){
double sum=0.0;
for(int j=0; j<n; j++){
sum += b[i][j]*c[j];
}
a[i]=sum
}
Anything wrong?
Runs in parallel
Data initialization is sequential.
More NUMA friendly NUMA implementation
Question:
How can I dynamically allocate large arrays if I need to be NUMA-aware? Should I use malloc() or would the entire array be placed in one core?
Depends on how to use malloc()
Malloc only requests data.
Make a large malloc outside parallel region and DON"T touch it
Or initialize in parallel region if possible
Calloc initializes data to a value. It is evil, don't do it unless it's in the
In C++, how do you use std::vectors in parallel
Probably do it in parallel region if can be used in each core
Other scheduling algorithms
Allocate memory in random threads and hope for the best
*/
conv_ret conv4d_convolve_OpenMP_naive(conv4d_layer layer, featuremap_3d input, featuremap_3d output)
{
//Benchmarking setup
conv_ret ret;
clock_t start_t, end_t;
start_t = clock();
//Reset memory
memset(output.data, 0, featuremap_3d_size(output) * sizeof(float));
float* i_array = input.data;
float* fw_array = layer.weights;
float* fb_array = layer.bias;
float* o_array = output.data;
#pragma omp parallel default(none) firstprivate(i_array, fw_array, fb_array, o_array, layer, input, output)
{
//Iterators
long n, q, p, s, r, c, m;
#pragma omp for schedule(static) collapse(7) nowait
//Begin convolution
for (n = 0; n < output.batches; n++)
for (q = 0; q < output.height; q++)
for (p = 0; p < output.width; p++)
for (s = 0; s < layer.kernel_height; s++)
for (r = 0; r < layer.kernel_width; r++)
for (c = 0; c < input.channels; c++)
for (m = 0; m < output.channels; m++)
{
size_t i_index = n * input.channels * input.width * input.height
+ (layer.stride_size * q + s) * input.channels * input.width
+ (layer.stride_size * p + r) * input.channels
+ c;
size_t o_index = n * output.channels * output.width * output.height
+ q * output.channels * output.width
+ p * output.channels
+ m;
size_t f_index = s * layer.output_channels * layer.input_channels * layer.kernel_width
+ r * layer.output_channels * layer.input_channels
+ c * layer.output_channels
+ m;
o_array[o_index] += i_array[i_index] * fw_array[f_index];
}
#pragma omp for schedule(static)
//Bias
for (n = 0; n < output.batches; n++)
for (q = 0; q < output.height; q++)
for (p = 0; p < output.width; p++)
for (m = 0; m < output.channels; m++){
size_t o_index = n * output.channels * output.width * output.height
+ q * output.channels * output.width
+ p * output.channels
+ m;
o_array[o_index] += fb_array[m];
}
}//End parallel region
//End benchmarking
end_t = clock();
ret.time_elapsed = (double)(end_t - start_t) / CLOCKS_PER_SEC;
return ret;
}
conv_ret conv4d_convolve_OpenMP_optimized(conv4d_layer layer, featuremap_3d input, featuremap_3d output, const size_t block_size)
{
//Benchmarking setup
conv_ret ret;
clock_t start_t, end_t;
start_t = clock();
//Reset memory
memset(output.data, 0, featuremap_3d_size(output) * sizeof(float));
float* i_array = input.data;
float* fw_array = layer.weights;
float* fb_array = layer.bias;
float* o_array = output.data;
#pragma omp parallel default(none) firstprivate(i_array, fw_array, fb_array, o_array, layer, input, output, block_size)
{
//Iterators
long n0, q0, s0, p, r, c, m, n1, q1, s1;
//Convolve
size_t n, s, q;
for (n0 = 0; n0 < output.batches; n0 += block_size)
for (q0 = 0; q0 < output.height; q0 += block_size)
for (s0 = 0; s0 < layer.kernel_height; s0+=block_size)
for (p = 0; p < output.width; p++)
for (r = 0; r < layer.kernel_width; r++)
for (c = 0; c < input.channels; c++)
for (m = 0; m < output.channels; m++)
//Blocking over n, q, and p
#pragma omp for schedule(static) collapse(3) nowait
for (n1 = 0; n1 < block_size; n1++){
for (q1 = 0; q1 < block_size; q1++){
for (s1 = 0; s1 < block_size; s1++){
if((q=q0+q1) >= output.height) continue;
if((n=n0+n1) >= output.batches) continue;
if((s=s0+s1) >= layer.kernel_height) continue;
size_t i_index = n * input.channels * input.width * input.height
+ (layer.stride_size * q + s) * input.channels * input.width
+ (layer.stride_size * p + r) * input.channels
+ c;
size_t o_index = n * output.channels * output.width * output.height
+ q * output.channels * output.width
+ p * output.channels
+ m;
size_t f_index = s * layer.output_channels * layer.input_channels * layer.kernel_width
+ r * layer.output_channels * layer.input_channels
+ c * layer.output_channels
+ m;
float i = input.data[i_index];
float f = layer.weights[f_index];
output.data[o_index] += i * f;
//printf("%I32d %zu %zu %zu\n", iteration++, i_index, f_index, o_index);
}
}
}
//Bias
#pragma omp for schedule(static) collapse(4) nowait
for (size_t n = 0; n < output.batches; n++)
for (size_t q = 0; q < output.height; q++)
for (size_t p = 0; p < output.width; p++)
for (size_t m = 0; m < output.channels; m++)
*(featuremap_3d_addr_of(output, n, m, p, q)) += layer.bias[m];
}//End parallel region
//End benchmarking
end_t = clock();
ret.time_elapsed = (double)(end_t - start_t) / CLOCKS_PER_SEC;
return ret;
}
#endif |
par_lr_interp.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "aux_interp.h"
#define MAX_C_CONNECTIONS 100
#define HAVE_COMMON_C 1
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildStdInterp
* Comment: The interpolatory weighting can be changed with the sep_weight
* variable. This can enable not separating negative and positive
* off diagonals in the weight formula.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildStdInterp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int sep_weight,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/* HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
//HYPRE_BigInt *found;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
HYPRE_Int *ihat = NULL;
HYPRE_Int *ihat_offd = NULL;
HYPRE_Int *ipnt = NULL;
HYPRE_Int *ipnt_offd = NULL;
HYPRE_Int strong_f_marker = -2;
/* Interpolation weight variables */
HYPRE_Real *ahat = NULL;
HYPRE_Real *ahat_offd = NULL;
HYPRE_Real sum_pos, sum_pos_C, sum_neg, sum_neg_C, sum, sum_C;
HYPRE_Real diagonal, distribute;
HYPRE_Real alfa = 1.;
HYPRE_Real beta = 1.;
/* Loop variables */
// HYPRE_Int index;
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, j1, jj, kk, k1;
HYPRE_Int cnt_c, cnt_f, cnt_c_offd, cnt_f_offd, indx;
HYPRE_BigInt big_k1;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
HYPRE_Real wall_1 = 0;
HYPRE_Real wall_2 = 0;
HYPRE_Real wall_3 = 0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; }
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 0);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
{
P_offd_i[i] = jj_counter_offd;
}
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
/* i1 is a C point */
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
/* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
if (P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
/* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (CF_marker[loc_col] >= 0)
{
if (P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (CF_marker_offd[loc_col] >= 0)
{
if (P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
}
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/* Fine to coarse mapping */
if (num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
/* Initialize ahat, which is a modification to a, used in the standard
* interpolation routine. */
if (n_fine)
{
ahat = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST);
ihat = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
ipnt = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
ahat_offd = hypre_CTAlloc(HYPRE_Real, full_off_procNodes, HYPRE_MEMORY_HOST);
ihat_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
ipnt_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
ahat[i] = 0;
ihat[i] = -1;
}
for (i = 0; i < full_off_procNodes; i++)
{
P_marker_offd[i] = -1;
ahat_offd[i] = 0;
ihat_offd[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
if (num_procs > 1)
{
jj_begin_row_offd = jj_counter_offd;
}
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = i1;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = k1;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] >= 0)
{
if (P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (CF_marker[loc_col] >= 0)
{
if (P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = loc_col;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (CF_marker_offd[loc_col] >= 0)
{
if (P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd] = loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_1 += wall_time;
fflush(NULL);
}
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds();
}
cnt_c = 0;
cnt_f = jj_end_row - jj_begin_row;
cnt_c_offd = 0;
cnt_f_offd = jj_end_row_offd - jj_begin_row_offd;
ihat[i] = cnt_f;
ipnt[cnt_f] = i;
ahat[cnt_f++] = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++)
{
/* i1 is direct neighbor */
i1 = A_diag_j[jj];
if (P_marker[i1] != strong_f_marker)
{
indx = ihat[i1];
if (indx > -1)
{
ahat[indx] += A_diag_data[jj];
}
else if (P_marker[i1] >= jj_begin_row)
{
ihat[i1] = cnt_c;
ipnt[cnt_c] = i1;
ahat[cnt_c++] += A_diag_data[jj];
}
else if (CF_marker[i1] != -3)
{
ihat[i1] = cnt_f;
ipnt[cnt_f] = i1;
ahat[cnt_f++] += A_diag_data[jj];
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
distribute = A_diag_data[jj] / A_diag_data[A_diag_i[i1]];
for (kk = A_diag_i[i1] + 1; kk < A_diag_i[i1 + 1]; kk++)
{
k1 = A_diag_j[kk];
indx = ihat[k1];
if (indx > -1)
{
ahat[indx] -= A_diag_data[kk] * distribute;
}
else if (P_marker[k1] >= jj_begin_row)
{
ihat[k1] = cnt_c;
ipnt[cnt_c] = k1;
ahat[cnt_c++] -= A_diag_data[kk] * distribute;
}
else
{
ihat[k1] = cnt_f;
ipnt[cnt_f] = k1;
ahat[cnt_f++] -= A_diag_data[kk] * distribute;
}
}
if (num_procs > 1)
{
for (kk = A_offd_i[i1]; kk < A_offd_i[i1 + 1]; kk++)
{
k1 = A_offd_j[kk];
indx = ihat_offd[k1];
if (num_functions == 1 || dof_func[i1] == dof_func_offd[k1])
{
if (indx > -1)
{
ahat_offd[indx] -= A_offd_data[kk] * distribute;
}
else if (P_marker_offd[k1] >= jj_begin_row_offd)
{
ihat_offd[k1] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = k1;
ahat_offd[cnt_c_offd++] -= A_offd_data[kk] * distribute;
}
else
{
ihat_offd[k1] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = k1;
ahat_offd[cnt_f_offd++] -= A_offd_data[kk] * distribute;
}
}
}
}
}
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
if (P_marker_offd[i1] != strong_f_marker)
{
indx = ihat_offd[i1];
if (indx > -1)
{
ahat_offd[indx] += A_offd_data[jj];
}
else if (P_marker_offd[i1] >= jj_begin_row_offd)
{
ihat_offd[i1] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = i1;
ahat_offd[cnt_c_offd++] += A_offd_data[jj];
}
else if (CF_marker_offd[i1] != -3)
{
ihat_offd[i1] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = i1;
ahat_offd[cnt_f_offd++] += A_offd_data[jj];
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
distribute = A_offd_data[jj] / A_ext_data[A_ext_i[i1]];
for (kk = A_ext_i[i1] + 1; kk < A_ext_i[i1 + 1]; kk++)
{
big_k1 = A_ext_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/*diag*/
loc_col = (HYPRE_Int)(big_k1 - col_1);
indx = ihat[loc_col];
if (indx > -1)
{
ahat[indx] -= A_ext_data[kk] * distribute;
}
else if (P_marker[loc_col] >= jj_begin_row)
{
ihat[loc_col] = cnt_c;
ipnt[cnt_c] = loc_col;
ahat[cnt_c++] -= A_ext_data[kk] * distribute;
}
else
{
ihat[loc_col] = cnt_f;
ipnt[cnt_f] = loc_col;
ahat[cnt_f++] -= A_ext_data[kk] * distribute;
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (num_functions == 1 || dof_func_offd[loc_col] == dof_func_offd[i1])
{
indx = ihat_offd[loc_col];
if (indx > -1)
{
ahat_offd[indx] -= A_ext_data[kk] * distribute;
}
else if (P_marker_offd[loc_col] >= jj_begin_row_offd)
{
ihat_offd[loc_col] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = loc_col;
ahat_offd[cnt_c_offd++] -= A_ext_data[kk] * distribute;
}
else
{
ihat_offd[loc_col] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = loc_col;
ahat_offd[cnt_f_offd++] -= A_ext_data[kk] * distribute;
}
}
}
}
}
}
}
}
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_2 += wall_time;
fflush(NULL);
}
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
diagonal = ahat[cnt_c];
ahat[cnt_c] = 0;
sum_pos = 0;
sum_pos_C = 0;
sum_neg = 0;
sum_neg_C = 0;
sum = 0;
sum_C = 0;
if (sep_weight == 1)
{
for (jj = 0; jj < cnt_c; jj++)
{
if (ahat[jj] > 0)
{
sum_pos_C += ahat[jj];
}
else
{
sum_neg_C += ahat[jj];
}
}
if (num_procs > 1)
{
for (jj = 0; jj < cnt_c_offd; jj++)
{
if (ahat_offd[jj] > 0)
{
sum_pos_C += ahat_offd[jj];
}
else
{
sum_neg_C += ahat_offd[jj];
}
}
}
sum_pos = sum_pos_C;
sum_neg = sum_neg_C;
for (jj = cnt_c + 1; jj < cnt_f; jj++)
{
if (ahat[jj] > 0)
{
sum_pos += ahat[jj];
}
else
{
sum_neg += ahat[jj];
}
ahat[jj] = 0;
}
if (num_procs > 1)
{
for (jj = cnt_c_offd; jj < cnt_f_offd; jj++)
{
if (ahat_offd[jj] > 0)
{
sum_pos += ahat_offd[jj];
}
else
{
sum_neg += ahat_offd[jj];
}
ahat_offd[jj] = 0;
}
}
if (sum_neg_C * diagonal != 0)
{
alfa = sum_neg / sum_neg_C / diagonal;
}
if (sum_pos_C * diagonal != 0)
{
beta = sum_pos / sum_pos_C / diagonal;
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
j1 = ihat[P_diag_j[jj]];
if (ahat[j1] > 0)
{
P_diag_data[jj] = -beta * ahat[j1];
}
else
{
P_diag_data[jj] = -alfa * ahat[j1];
}
P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]];
ahat[j1] = 0;
}
for (jj = 0; jj < cnt_f; jj++)
{
ihat[ipnt[jj]] = -1;
}
if (num_procs > 1)
{
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
j1 = ihat_offd[P_offd_j[jj]];
if (ahat_offd[j1] > 0)
{
P_offd_data[jj] = -beta * ahat_offd[j1];
}
else
{
P_offd_data[jj] = -alfa * ahat_offd[j1];
}
ahat_offd[j1] = 0;
}
for (jj = 0; jj < cnt_f_offd; jj++)
{
ihat_offd[ipnt_offd[jj]] = -1;
}
}
}
else
{
for (jj = 0; jj < cnt_c; jj++)
{
sum_C += ahat[jj];
}
if (num_procs > 1)
{
for (jj = 0; jj < cnt_c_offd; jj++)
{
sum_C += ahat_offd[jj];
}
}
sum = sum_C;
for (jj = cnt_c + 1; jj < cnt_f; jj++)
{
sum += ahat[jj];
ahat[jj] = 0;
}
if (num_procs > 1)
{
for (jj = cnt_c_offd; jj < cnt_f_offd; jj++)
{
sum += ahat_offd[jj];
ahat_offd[jj] = 0;
}
}
if (sum_C * diagonal != 0)
{
alfa = sum / sum_C / diagonal;
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
j1 = ihat[P_diag_j[jj]];
P_diag_data[jj] = -alfa * ahat[j1];
P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]];
ahat[j1] = 0;
}
for (jj = 0; jj < cnt_f; jj++)
{
ihat[ipnt[jj]] = -1;
}
if (num_procs > 1)
{
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
j1 = ihat_offd[P_offd_j[jj]];
P_offd_data[jj] = -alfa * ahat_offd[j1];
ahat_offd[j1] = 0;
}
for (jj = 0; jj < cnt_f_offd; jj++)
{
ihat_offd[ipnt_offd[jj]] = -1;
}
}
}
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_3 += wall_time;
fflush(NULL);
}
}
}
if (debug_flag == 4)
{
hypre_printf("Proc = %d fill part 1 %f part 2 %f part 3 %f\n",
my_id, wall_1, wall_2, wall_3);
fflush(NULL);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if (P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i = 0; i < n_fine; i++)
{
if (CF_marker[i] == -3)
{
CF_marker[i] = -1;
}
}
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(ahat, HYPRE_MEMORY_HOST);
hypre_TFree(ihat, HYPRE_MEMORY_HOST);
hypre_TFree(ipnt, HYPRE_MEMORY_HOST);
if (full_off_procNodes)
{
hypre_TFree(ahat_offd, HYPRE_MEMORY_HOST);
hypre_TFree(ihat_offd, HYPRE_MEMORY_HOST);
hypre_TFree(ipnt_offd, HYPRE_MEMORY_HOST);
}
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1)
{
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
}
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildExtPIInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildExtPIInterpHost(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] -= hypre_MPI_Wtime();
#endif
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
HYPRE_Int sgn = 1;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, jj, kk, k1, jj1;
HYPRE_BigInt big_k1;
/* Threading variables */
HYPRE_Int my_thread_num, num_threads, start, stop;
HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST);
HYPRE_Int * diag_offset;
HYPRE_Int * fine_to_coarse_offset;
HYPRE_Int * offd_offset;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; }
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
/* This function is smart enough to check P_marker and P_marker_offd only,
* and set them if they are not NULL. The other vectors are set regardless.*/
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
/*-----------------------------------------------------------------------
* Initialize threading variables
*-----------------------------------------------------------------------*/
max_num_threads[0] = hypre_NumThreads();
diag_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST);
fine_to_coarse_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST);
offd_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST);
for (i = 0; i < max_num_threads[0]; i++)
{
diag_offset[i] = 0;
fine_to_coarse_offset[i] = 0;
offd_offset[i] = 0;
}
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,my_thread_num,num_threads,start,stop,coarse_counter,jj_counter,jj_counter_offd, P_marker, P_marker_offd,jj,kk,i1,k1,loc_col,jj_begin_row,jj_begin_row_offd,jj_end_row,jj_end_row_offd,diagonal,sum,sgn,jj1,i2,distribute,strong_f_marker, big_k1)
#endif
{
/* Parallelize by computing only over each thread's range of rows.
*
* The first large for loop computes ~locally~ for each thread P_diag_i,
* P_offd_i and fine_to_coarse. Then, the arrays are stitched together
* For eaxample the first phase would compute
* P_diag_i = [0, 2, 4, 7, 2, 5, 6]
* for two threads. P_diag_i[stop] points to the end of that
* thread's data, but P_diag_i[start] points to the end of the
* previous thread's row range. This is then stitched together at the
* end to yield,
* P_diag_i = [0, 2, 4, 7, 9, 14, 15].
*
* The second large for loop computes interpolation weights and is
* relatively straight-forward to thread.
*/
/* initialize thread-wise variables */
strong_f_marker = -2;
coarse_counter = 0;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (n_fine)
{
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{ P_marker[i] = -1; }
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
for (i = 0; i < full_off_procNodes; i++)
{ P_marker_offd[i] = -1;}
}
/* this thread's row range */
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
start = (n_fine / num_threads) * my_thread_num;
if (my_thread_num == num_threads - 1)
{ stop = n_fine; }
else
{ stop = (n_fine / num_threads) * (my_thread_num + 1); }
/* loop over rows */
/* This loop counts the number of elements in P */
/* is done by counting the elmements in the index set C-hat */
for (i = start; i < stop; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
{
P_offd_i[i] = jj_counter_offd;
}
if (CF_marker[i] >= 0)
{
/* row in P corresponding to a coarse pt., will only require one element (1 on the diagonal). */
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
/* i1 is a C point */
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
/* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
if (P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
/* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* End loop over fine grid.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
P_diag_i[stop] = jj_counter;
P_offd_i[stop] = jj_counter_offd;
fine_to_coarse_offset[my_thread_num] = coarse_counter;
diag_offset[my_thread_num] = jj_counter;
offd_offset[my_thread_num] = jj_counter_offd;
/* Stitch P_diag_i, P_offd_i and fine_to_coarse together */
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
/* Calculate the offset for P_diag_i and P_offd_i for each thread */
for (i = 1; i < num_threads; i++)
{
diag_offset[i] = diag_offset[i - 1] + diag_offset[i];
fine_to_coarse_offset[i] = fine_to_coarse_offset[i - 1] + fine_to_coarse_offset[i];
offd_offset[i] = offd_offset[i - 1] + offd_offset[i];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num > 0)
{
/* update row pointer array with offset,
* making sure to update the row stop index */
for (i = start + 1; i <= stop; i++)
{
P_diag_i[i] += diag_offset[my_thread_num - 1];
P_offd_i[i] += offd_offset[my_thread_num - 1];
}
/* update fine_to_coarse by offsetting with the offset
* from the preceding thread */
for (i = start; i < stop; i++)
{
if (fine_to_coarse[i] >= 0)
{ fine_to_coarse[i] += fine_to_coarse_offset[my_thread_num - 1]; }
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
}
/* Fine to coarse mapping */
if (num_procs > 1 && my_thread_num == 0)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < full_off_procNodes; i++)
{
P_marker_offd[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = start; i < stop; i++)
{
jj_begin_row = P_diag_i[i];
jj_begin_row_offd = P_offd_i[i];
jj_counter = jj_begin_row;
jj_counter_offd = jj_begin_row_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] >= 0)
{
if (P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if (big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd] = loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++)
{
/* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) { sgn = -1; }
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i. */
for (jj1 = A_diag_i[i1] + 1; jj1 < A_diag_i[i1 + 1]; jj1++)
{
i2 = A_diag_j[jj1];
if ((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn * A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn * A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/* Loop over row of A for point i1 and do the distribution */
for (jj1 = A_diag_i[i1] + 1; jj1 < A_diag_i[i1 + 1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row && (sgn * A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute * A_diag_data[jj1];
if (i2 == i && (sgn * A_diag_data[jj1]) < 0)
{
diagonal += distribute * A_diag_data[jj1];
}
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn * A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute * A_offd_data[jj1];
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
diagonal += A_diag_data[jj];
}
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1 + 1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row || loc_col == i)
{
sum += A_ext_data[jj1];
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1 + 1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute *
A_ext_data[jj1];
if (loc_col == i)
{
diagonal += distribute * A_ext_data[jj1];
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute *
A_ext_data[jj1];
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
diagonal += A_offd_data[jj];
}
}
}
}
if (diagonal)
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
}
/*-----------------------------------------------------------------------
* End large for loop over nfine
*-----------------------------------------------------------------------*/
if (n_fine)
{
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
}
/*-----------------------------------------------------------------------
* End PAR_REGION
*-----------------------------------------------------------------------*/
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d fill structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] -= hypre_MPI_Wtime();
#endif
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if (P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++)
{
if (CF_marker[i] == -3)
{
CF_marker[i] = -1;
}
}
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(max_num_threads, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(diag_offset, HYPRE_MEMORY_HOST);
hypre_TFree(offd_offset, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse_offset, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1)
{
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
}
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildExtPICCInterp
* Comment: Only use FF when there is no common c point.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildExtPICCInterp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/*HYPRE_Int **ext_p, **ext_p_offd;*/
/*HYPRE_Int ccounter_offd;
HYPRE_Int *clist_offd;*/
HYPRE_Int common_c;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
HYPRE_Int sgn = 1;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, jj, kk, k1, jj1;
HYPRE_BigInt big_k1;
/*HYPRE_Int ccounter;
HYPRE_Int *clist, ccounter;*/
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; }
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
/*clist = hypre_CTAlloc(HYPRE_Int, MAX_C_CONNECTIONS);
for (i = 0; i < MAX_C_CONNECTIONS; i++)
clist[i] = 0;
if (num_procs > 1)
{
clist_offd = hypre_CTAlloc(HYPRE_Int, MAX_C_CONNECTIONS, HYPRE_MEMORY_HOST);
for (i = 0; i < MAX_C_CONNECTIONS; i++)
clist_offd[i] = 0;
}*/
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
{
P_offd_i[i] = jj_counter_offd;
}
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
/* Initialize ccounter for each f point */
/*ccounter = 0;
ccounter_offd = 0;*/
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{
/* i1 is a C point */
CF_marker[i1] = 2;
if (P_marker[i1] < P_diag_i[i])
{
/*clist[ccounter++] = i1;*/
P_marker[i1] = jj_counter;
jj_counter++;
}
}
}
/*qsort0(clist,0,ccounter-1);*/
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
/* search through offd to find all c neighbors */
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
/* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 2;
if (P_marker_offd[i1] < P_offd_i[i])
{
/*clist_offd[ccounter_offd++] = i1;*/
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
/*qsort0(clist_offd,0,ccounter_offd-1);*/
}
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* Search diag to find f neighbors and determine if common c point */
i1 = S_diag_j[jj];
if (CF_marker[i1] == -1)
{
/* i1 is a F point, loop through it's strong neighbors */
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
/*if (hypre_BinarySearch(clist,k1,ccounter) >= 0)
{*/
common_c = 1;
break;
/*kk = S_diag_i[i1+1];
}*/
}
}
if (num_procs > 1 && common_c == 0)
{
/* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{
/* k1 is a c point check if it is common */
/*if (hypre_BinarySearch(clist_offd,k1,ccounter_offd) >= 0)
{*/
common_c = 1;
break;
/*kk = S_offd_i[i1+1];
}*/
}
}
}
if (!common_c)
{
/* No common c point, extend the interp set */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if (P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
/*break;*/
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if (P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
/*break;*/
}
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] == -1)
{
/* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
common_c = 0;
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
/* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (CF_marker[loc_col] == 2)
{
/*if (hypre_BinarySearch(clist,loc_col,ccounter) >= 0)
{*/
common_c = 1;
break;
/*kk = Sop_i[i1+1];
}*/
}
}
else
{
loc_col = (HYPRE_BigInt)(-big_k1 - 1);
if (CF_marker_offd[loc_col] == 2)
{
/*if (hypre_BinarySearch(clist_offd,loc_col,ccounter_offd) >=
0)
{*/
common_c = 1;
break;
/*kk = Sop_i[i1+1];
}*/
}
}
}
if (!common_c)
{
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
/* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
/*break;*/
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
/*break;*/
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] == 2)
{
CF_marker[i1] = 1;
}
}
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
/* search through offd to find all c neighbors */
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] == 2)
{
/* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 1;
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*ccounter = start_indexing;
ccounter_offd = start_indexing;*/
/* Fine to coarse mapping */
if (num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < full_off_procNodes; i++)
{
P_marker_offd[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
if (num_procs > 1)
{
jj_begin_row_offd = jj_counter_offd;
}
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
/*ccounter = 0;
ccounter_offd = 0;*/
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] > 0)
{
CF_marker[i1] = 2;
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
/*clist[ccounter++] = i1;*/
}
}
}
/*qsort0(clist,0,ccounter-1);*/
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] > 0)
{
CF_marker_offd[i1] = 2;
if (P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
/*clist_offd[ccounter_offd++] = i1;*/
}
}
}
/*qsort0(clist_offd,0,ccounter_offd-1);*/
}
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* Search through F points */
i1 = S_diag_j[jj];
if (CF_marker[i1] == -1)
{
P_marker[i1] = strong_f_marker;
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
/*if (hypre_BinarySearch(clist,k1,ccounter) >= 0)
{*/
common_c = 1;
break;
/*kk = S_diag_i[i1+1];
}*/
}
}
if (num_procs > 1 && common_c == 0)
{
/* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{
/* k1 is a c point check if it is common */
/*if (hypre_BinarySearch(clist_offd,k1,ccounter_offd) >= 0)
{*/
common_c = 1;
break;
/*kk = S_offd_i[i1+1];
}*/
}
}
}
if (!common_c)
{
/* No common c point, extend the interp set */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
/*break;*/
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
/*break;*/
}
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] == -1)
{
/* F points that are off proc */
P_marker_offd[i1] = strong_f_marker;
common_c = 0;
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
/* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (CF_marker[loc_col] == 2)
{
/*if (hypre_BinarySearch(clist,loc_col,ccounter) >= 0)
{*/
common_c = 1;
break;
/*kk = Sop_i[i1+1];
}*/
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (CF_marker_offd[loc_col] == 2)
{
/*if (hypre_BinarySearch(clist_offd,loc_col,ccounter_offd) >=
0)
{*/
common_c = 1;
break;
/*kk = Sop_i[i1+1];
}*/
}
}
}
if (!common_c)
{
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if (big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
/*break;*/
}
}
else
{
loc_col = (-big_k1 - 1);
if (P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd] = loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
/*break;*/
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] == 2)
{
CF_marker[i1] = 1;
}
}
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] == 2)
{
CF_marker_offd[i1] = 1;
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++)
{
/* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) { sgn = -1; }
for (jj1 = A_diag_i[i1] + 1; jj1 < A_diag_i[i1 + 1]; jj1++)
{
i2 = A_diag_j[jj1];
if ((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn * A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn * A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/* Loop over row of A for point i1 and do the distribution */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1 + 1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row && (sgn * A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute * A_diag_data[jj1];
if (i2 == i && (sgn * A_diag_data[jj1]) < 0)
{
diagonal += distribute * A_diag_data[jj1];
}
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn * A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute * A_offd_data[jj1];
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
diagonal += A_diag_data[jj];
}
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1 + 1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row || loc_col == i)
{
sum += A_ext_data[jj1];
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1 + 1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute *
A_ext_data[jj1];
if (loc_col == i)
{
diagonal += distribute * A_ext_data[jj1];
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute *
A_ext_data[jj1];
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
diagonal += A_offd_data[jj];
}
}
}
}
if (diagonal)
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if (P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i = 0; i < n_fine; i++)
if (CF_marker[i] == -3) { CF_marker[i] = -1; }
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
/*hypre_TFree(clist);*/
if (num_procs > 1)
{
/*hypre_TFree(clist_offd);*/
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1)
{
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
}
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildFFInterp
* Comment: Only use FF when there is no common c point.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildFFInterp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/*HYPRE_Int ccounter_offd;*/
HYPRE_Int common_c;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
HYPRE_Int sgn = 1;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, jj, kk, k1, jj1;
HYPRE_BigInt big_k1;
/*HYPRE_Int ccounter;
HYPRE_Int *clist, ccounter;*/
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; }
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
{
P_offd_i[i] = jj_counter_offd;
}
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else
{
/* Initialize ccounter for each f point */
/*ccounter = 0;
ccounter_offd = 0;*/
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{
/* i1 is a C point */
CF_marker[i1] = 2;
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
/* search through offd to find all c neighbors */
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
/* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 2;
if (P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* Search diag to find f neighbors and determine if common c point */
i1 = S_diag_j[jj];
if (CF_marker[i1] < 0)
{
/* i1 is a F point, loop through it's strong neighbors */
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
common_c = 1;
break;
}
}
if (num_procs > 1 && common_c == 0)
{
/* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{
common_c = 1;
break;
}
}
}
if (!common_c)
{
/* No common c point, extend the interp set */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if (P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if (P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] < 0)
{
/* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
common_c = 0;
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
/* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (CF_marker[loc_col] == 2)
{
common_c = 1;
break;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (CF_marker_offd[loc_col] == 2)
{
common_c = 1;
break;
}
}
}
if (!common_c)
{
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
/* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] == 2)
{
CF_marker[i1] = 1;
}
}
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
/* search through offd to find all c neighbors */
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] == 2)
{
/* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 1;
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*ccounter = start_indexing;
ccounter_offd = start_indexing;*/
/* Fine to coarse mapping */
if (num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < full_off_procNodes; i++)
{
P_marker_offd[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
jj_begin_row_offd = 0;
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
if (num_procs > 1)
{
jj_begin_row_offd = jj_counter_offd;
}
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
/*ccounter = 0;
ccounter_offd = 0;*/
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] > 0)
{
CF_marker[i1] = 2;
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] > 0)
{
CF_marker_offd[i1] = 2;
if (P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* Search through F points */
i1 = S_diag_j[jj];
if (CF_marker[i1] == -1)
{
P_marker[i1] = strong_f_marker;
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
common_c = 1;
break;
}
}
if (num_procs > 1 && common_c == 0)
{
/* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{
common_c = 1;
break;
}
}
}
if (!common_c)
{
/* No common c point, extend the interp set */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] == -1)
{
/* F points that are off proc */
P_marker_offd[i1] = strong_f_marker;
common_c = 0;
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
/* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (CF_marker[loc_col] == 2)
{
common_c = 1;
break;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (CF_marker_offd[loc_col] == 2)
{
common_c = 1;
break;
}
}
}
if (!common_c)
{
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if (big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd] = loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] == 2)
{
CF_marker[i1] = 1;
}
}
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] == 2)
{
CF_marker_offd[i1] = 1;
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++)
{
/* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
if (A_diag_data[A_diag_i[i1]] < 0) { sgn = -1; }
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1 + 1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row && (sgn * A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn * A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/* Loop over row of A for point i1 and do the distribution */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1 + 1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row && (sgn * A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute * A_diag_data[jj1];
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn * A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute * A_offd_data[jj1];
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
diagonal += A_diag_data[jj];
}
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1 + 1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row)
{
sum += A_ext_data[jj1];
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1 + 1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute *
A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute *
A_ext_data[jj1];
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
diagonal += A_offd_data[jj];
}
}
}
}
if (diagonal)
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if (P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i = 0; i < n_fine; i++)
if (CF_marker[i] == -3) { CF_marker[i] = -1; }
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1)
{
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
}
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildFF1Interp
* Comment: Only use FF when there is no common c point.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildFF1Interp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/*HYPRE_Int ccounter_offd;*/
HYPRE_Int common_c;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
HYPRE_Int sgn = 1;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, jj, kk, k1, jj1;
HYPRE_BigInt big_k1;
/*HYPRE_Int ccounter;*/
HYPRE_Int found_c = 0;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; }
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
{
P_offd_i[i] = jj_counter_offd;
}
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else
{
/* Initialize ccounter for each f point */
/*ccounter = 0;
ccounter_offd = 0;*/
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{
/* i1 is a C point */
CF_marker[i1] = 2;
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
/* search through offd to find all c neighbors */
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
/* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 2;
if (P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* Search diag to find f neighbors and determine if common c point */
i1 = S_diag_j[jj];
if (CF_marker[i1] < 0)
{
/* i1 is a F point, loop through it's strong neighbors */
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
common_c = 1;
break;
}
}
if (num_procs > 1 && common_c == 0)
{
/* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{
/* k1 is a c point check if it is common */
common_c = 1;
break;
}
}
}
if (!common_c)
{
/* No common c point, extend the interp set */
found_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if (P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
found_c = 1;
break;
}
}
}
if (num_procs > 1 && !found_c)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if (P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
break;
}
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] < 0)
{
/* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
common_c = 0;
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
/* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (CF_marker[loc_col] == 2)
{
common_c = 1;
break;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (CF_marker_offd[loc_col] == 2)
{
common_c = 1;
break;
}
}
}
if (!common_c)
{
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
/* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
break;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
break;
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] == 2)
{
CF_marker[i1] = 1;
}
}
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
/* search through offd to find all c neighbors */
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] == 2)
{
/* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 1;
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*ccounter = start_indexing;
ccounter_offd = start_indexing;*/
/* Fine to coarse mapping */
if (num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < full_off_procNodes; i++)
{
P_marker_offd[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
jj_begin_row_offd = 0;
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
if (num_procs > 1)
{
jj_begin_row_offd = jj_counter_offd;
}
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
/*ccounter = 0;
ccounter_offd = 0;*/
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] > 0)
{
CF_marker[i1] = 2;
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] > 0)
{
CF_marker_offd[i1] = 2;
if (P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* Search through F points */
i1 = S_diag_j[jj];
if (CF_marker[i1] == -1)
{
P_marker[i1] = strong_f_marker;
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
common_c = 1;
break;
}
}
if (num_procs > 1 && common_c == 0)
{
/* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{
/* k1 is a c point check if it is common */
common_c = 1;
break;
}
}
}
if (!common_c)
{
/* No common c point, extend the interp set */
found_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
found_c = 1;
break;
}
}
}
if (num_procs > 1 && !found_c)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
break;
}
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] == -1)
{
/* F points that are off proc */
P_marker_offd[i1] = strong_f_marker;
common_c = 0;
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
/* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (CF_marker[loc_col] == 2)
{
common_c = 1;
break;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (CF_marker_offd[loc_col] == 2)
{
common_c = 1;
break;
}
}
}
if (!common_c)
{
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if (big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
break;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd] = loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
break;
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
/* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] == 2)
{
CF_marker[i1] = 1;
}
}
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] == 2)
{
CF_marker_offd[i1] = 1;
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++)
{
/* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
if (A_diag_data[A_diag_i[i1]] < 0) { sgn = -1; }
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1 + 1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row && (sgn * A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn * A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/* Loop over row of A for point i1 and do the distribution */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1 + 1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row && (sgn * A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute * A_diag_data[jj1];
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn * A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute * A_offd_data[jj1];
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
diagonal += A_diag_data[jj];
}
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1 + 1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row)
{
sum += A_ext_data[jj1];
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1 + 1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute *
A_ext_data[jj1];
}
else
{
loc_col = - (HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute *
A_ext_data[jj1];
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
diagonal += A_offd_data[jj];
}
}
}
}
if (diagonal)
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if (P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i = 0; i < n_fine; i++)
if (CF_marker[i] == -3) { CF_marker[i] = -1; }
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
/*hynre_TFree(clist);*/
if (num_procs > 1)
{
/*hypre_TFree(clist_offd);*/
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1)
{
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
}
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildExtInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildExtInterpHost(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
HYPRE_Int sgn = 1;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, jj, kk, k1, jj1;
HYPRE_BigInt big_k1;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); }
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs - 1))
{
total_global_cpts = num_cpts_global[1];
}
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
{
P_offd_i[i] = jj_counter_offd;
}
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
/* i1 is a C point */
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
/* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
if (P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
/* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* In S_diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds();
}
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/* Fine to coarse mapping */
if (num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < full_off_procNodes; i++)
{
P_marker_offd[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
jj_begin_row_offd = jj_counter_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1 + 1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1 + 1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] >= 0)
{
if (P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for (kk = Sop_i[i1]; kk < Sop_i[i1 + 1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if (big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd] = loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++)
{
/* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0)
{
sgn = -1;
}
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for (jj1 = A_diag_i[i1] + 1; jj1 < A_diag_i[i1 + 1]; jj1++)
{
i2 = A_diag_j[jj1];
if ((P_marker[i2] >= jj_begin_row ) && (sgn * A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn * A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/* Loop over row of A for point i1 and do the distribution */
for (jj1 = A_diag_i[i1] + 1; jj1 < A_diag_i[i1 + 1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row && (sgn * A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]] += distribute * A_diag_data[jj1];
}
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1 + 1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn * A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]] += distribute * A_offd_data[jj1];
}
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
diagonal += A_diag_data[jj];
}
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++)
{
i1 = A_offd_j[jj];
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1 + 1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row )
{
sum += A_ext_data[jj1];
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1 + 1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{
/* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row)
{
P_diag_data[P_marker[loc_col]] += distribute * A_ext_data[jj1];
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[loc_col]] += distribute * A_ext_data[jj1];
}
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
diagonal += A_offd_data[jj];
}
}
}
}
if (diagonal)
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
}
if (debug_flag == 4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d fill structure %f\n", my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if (P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i = 0; i < n_fine; i++)
{
if (CF_marker[i] == -3)
{
CF_marker[i] = -1;
}
}
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1)
{
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
}
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGBuildExtInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_GpuProfilingPushRange("ExtInterp");
#endif
HYPRE_Int ierr = 0;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
ierr = hypre_BoomerAMGBuildExtInterpDevice(A, CF_marker, S, num_cpts_global, num_functions,
dof_func,
debug_flag, trunc_factor, max_elmts, P_ptr);
}
else
#endif
{
ierr = hypre_BoomerAMGBuildExtInterpHost(A, CF_marker, S, num_cpts_global, num_functions, dof_func,
debug_flag, trunc_factor, max_elmts, P_ptr);
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_GpuProfilingPopRange();
#endif
return ierr;
}
/*-----------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildExtPIInterp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_GpuProfilingPushRange("ExtPIInterp");
#endif
HYPRE_Int ierr = 0;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
ierr = hypre_BoomerAMGBuildExtPIInterpDevice(A, CF_marker, S, num_cpts_global, num_functions,
dof_func,
debug_flag, trunc_factor, max_elmts, P_ptr);
}
else
#endif
{
ierr = hypre_BoomerAMGBuildExtPIInterpHost(A, CF_marker, S, num_cpts_global, num_functions,
dof_func,
debug_flag, trunc_factor, max_elmts, P_ptr);
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_GpuProfilingPopRange();
#endif
return ierr;
}
|
arraytools.h | /** \file arraytools.h
\brief Contains the array_link class and related classes.
This file contains method and classes to work with (orthogonal) arrays.
Author: Pieter Eendebak <pieter.eendebak@gmail.com>
Copyright: See LICENSE.txt file that comes with this distribution
*/
#pragma once
#ifdef WIN32
#define _CRT_SECURE_NO_DEPRECATE
#pragma warning(disable : 4996)
#pragma warning(disable : 4018)
#pragma warning(disable : 4244)
#endif
#ifdef WIN32
#ifdef FULLPACKAGE
#include "msstdint.h"
#endif
#else
#ifdef _WIN32 // || __CYGWIN__
// No visual studio!
#ifdef FULLPACKAGE
#ifndef int32_t
typedef __int32 int32_t;
typedef unsigned __int32 uint32_t;
#endif
#ifndef uint64_t
typedef(unsigned __int64) uint64_t;
#endif
#endif
#else
// assume zlib is present on unix
#ifdef NOZLIB
#else
#ifdef FULLPACKAGE
#ifndef USEZLIB
#define USEZLIB 1
#endif
#endif
#endif
#endif
#endif
#ifdef FULLPACKAGE
#include <iostream>
#endif
#include <assert.h>
#include <deque>
#include <fstream>
#include <iomanip>
#include <ostream>
#include <sstream>
#include <stdarg.h>
#include <stdlib.h>
#include <string.h>
#include <vector>
#include <map>
#include <stdexcept>
#include <Eigen/Core>
#include "printfheader.h"
void throw_runtime_exception (const std::string exception_message); // forward declaration to throw_runtime_exception in tools.cpp
// float types used for Eigen calculations
typedef Eigen::MatrixXd MatrixFloat;
typedef Eigen::ArrayXd ArrayFloat;
typedef Eigen::VectorXd VectorFloat;
typedef double eigenFloat;
/** Print information about an Eigen matrix
*
* \param m Matrix about which to print information
* \param str String to prepend in output
* \param verbose Verbosity level
*/
void eigenInfo (const MatrixFloat m, const char *str = "eigen", int verbose = 1);
/** Print Eigen matrix to stdout */
void print_eigen_matrix(const MatrixFloat matrix);
// helper function for Python interface
void eigen2numpyHelper (double *pymat1, int n, const MatrixFloat &m);
#ifdef USEZLIB
#include <zlib.h>
#endif
#include "mathtools.h"
#include "oaoptions.h"
#ifdef FULLPACKAGE
#include "bitarray/bit_array.h"
#include "md5.h"
#endif
extern "C" {}
/// data type for elements of orthogonal arrays
typedef short int array_t;
/// constant version of array_t
typedef const short int carray_t;
/* change definition below together with array_t !!!! */
#define MPI_ARRAY_T MPI_SHORT
/*other options for MPI_ARRAY_T are: char: MPI_CHAR, short: MPI_SHORT, int: MPI_INT, long: MPI_LONG */
typedef short int rowindex_t; /** type used for row indexing */
typedef int colindex_t; /** type used for column indexing */
typedef const int const_colindex_t; /** constant version of type used for column indexing */
/// pointer to array
typedef array_t *array_p;
/// pointer to constant array
typedef carray_t *carray_p;
typedef rowindex_t *rowperm_t; /** type of row permutation */
typedef colindex_t *colperm_t; /** type of column permutation */
typedef array_t *levelperm_t; /** type of level permutation */
// used to calculate the value (index) of values in a column combination
// this index is used in the strength calculations
// maximum value if of order max(s)*t
typedef int vindex_t; /* value index type */
/// return size in bytes of array_t type
int sizeof_array_t ();
/// return size in bytes of double type
int sizeof_double ();
/// possible values for J-values of 2-level design
inline std::vector< int > possible_F_values (int N, int strength) {
int x = pow ((double)2, strength + 1);
int nn = floor ((double)N / x) + 1;
std::vector< int > Fv (nn);
for (int i = 0; i < nn; i++) {
Fv[i] = N - x * i;
}
return Fv;
}
/// return true if the specified file exists
bool file_exists (const std::string filename);
/// return true if the specified file exists
bool file_exists (const char *filename);
/// return true if the specified oa file exists
bool oa_file_exists (const char *filename);
/// return true if the specified oa file exists
bool oa_file_exists (const std::string filename);
enum ordering_t {
/// lexicograph minimal by columns ordering
ORDER_LEX,
/// J5 based ordering
ORDER_J5
};
struct array_link;
/** @brief Specifies a class of arrays
*
* The specification includes the number of rows, number of columns, factor levels and strength.
*/
struct arraydata_t {
/// number of runs
rowindex_t N;
/// total number of columns (factors) in the design
colindex_t ncols;
/// strength of the design
colindex_t strength;
/// pointer to factor levels of the array
array_t *s;
/// Ordering used for arrays
ordering_t order;
/* derived data */
/// number of groups of columns with the same number of levels
colindex_t ncolgroups;
/// specifies for each column the index of the column group
colindex_t *colgroupindex;
/// specifies for each column the size of the column group
colindex_t *colgroupsize;
/// index of the array
int oaindex;
public:
/** Specifies a class of orthogonal arrays
*
* The specification includes the number of rows, number of columns, factor levels and strength.
*
* An orthogonal array of strength t, N runs, k factors (columns) and factor levels s[i] is an N times k array with
* symbols 0, 1, ..., s[i]-1 in column i such that for every t columns every t-tuple of elements occurs equally often.
*/
arraydata_t();
/**
* @copydoc arraydata_t::arraydata_t()
*
* \param s Factor levels
* \param N Number of rows
* \param strength Strength for class
* \param ncols Number of columns for the class
*/
arraydata_t (array_t s, rowindex_t N, colindex_t strength, colindex_t ncols);
/**
* @copydoc arraydata_t::arraydata_t()
*
* \param s Factor levels
* \param N Number of rows
* \param strength Strength for class
* \param ncols Number of columns for the class
*/
arraydata_t (const std::vector< int > s, rowindex_t N, colindex_t strength, colindex_t ncols);
/// @copydoc arraydata_t::arraydata_t()
arraydata_t (const array_t *s_, rowindex_t N, colindex_t strength, colindex_t ncols);
/// @copydoc arraydata_t::arraydata_t()
arraydata_t (const arraydata_t &adp);
/// @copydoc arraydata_t::arraydata_t()
arraydata_t (const arraydata_t *adp, colindex_t newncols);
~arraydata_t ();
arraydata_t& operator= (const arraydata_t &ad2);
int operator== (const arraydata_t &ad2);
/// return true if the class represents mixed-level arrays
bool ismixed () const;
/// return true if the class represents a 2-level array
bool is2level () const;
/// return random array from the class. this operation is only valid for strength 0 or 1
array_link randomarray (int strength = 0, int ncols = -1) const;
/** @brief Write file with specification of orthognal array class
*
* @param filename Filename to write to
*/
void writeConfigFile (const char *filename) const;
/// return string with class representation
std::string idstr () const;
/// return string with class representation. series of level is expended
std::string idstrseriesfull () const;
/// return string with class representation
std::string fullidstr (int series = 0) const;
/// return latex string describing the class
std::string latexstr (int cmd = 0, int series = 0) const;
public:
arraydata_t reduceColumns (int k) {
arraydata_t adata (this, k);
return adata;
}
/// Return string used for displaying the class
std::string showstr () const;
void show (int verbose = 1) const;
/// Calculate derived data such as the index and column groups from a design
void complete_arraydata ();
/// check whether the LMC calculation will overflow
void lmc_overflow_check () const;
// complete arraydata but split the column groups at the last column
void complete_arraydata_fixlast ();
// complete arraydata but split the column groups at ns
void complete_arraydata_splitn (int ns);
// set column groups at positions given by argument vector
void set_colgroups (const std::vector< int > splits);
/// set column group equal to that of a symmetry group
void set_colgroups (const symmetry_group &sg);
/// return sizes of the column groups
std::vector<int> get_column_groups_sizes() const;
/// show column groups in the array class
void show_colgroups () const;
/// calculate the index of the orthogonal arrays in this class
void calculate_oa_index (colindex_t strength);
/// return the root array for the class
array_link create_root (int n_columns = -1, int fill_value = 0) const;
/// return the factor level for the specified column return -1 if the column index is invalid
int getfactorlevel(int idx) const;
/// return factor levels
std::vector< int > getS () const {
myprintf("getS(): deprecated method: use factor_levels instead\n");
std::vector< int > s (this->ncols);
for (int i = 0; i < this->ncols; i++) {
s[i] = this->s[i];
}
return s;
}
/// return factor levels
std::vector< int > factor_levels () const;
/// return factor levels for the column groups
std::vector< int > factor_levels_column_groups() const;
/**
* @brief Reset strength of arraydata
* @param strength The strength to reset the structure to
*/
void reset_strength(colindex_t strength);
/// Return index of the column group for a column
colindex_t get_col_group(const colindex_t col) const;
public:
/// Return True if the factor levels are sorted from large to small
bool is_factor_levels_sorted() const;
};
/// Read array configuration from file
arraydata_t *readConfigFile (const char *file);
/**
* @brief Make a copy of an array
*/
inline void copy_array (const array_t *src, array_t *const dst, const int nrows, const int ncols) {
memcpy (dst, src, sizeof (array_t) * nrows * ncols);
}
/**
* @brief Delete an array
* @param array
* @return
*/
inline int destroy_array (array_t *array) {
free (array);
return 0;
}
/**
* @brief Create an array
* @param nrows Number of rows
* @param ncols Number of columns
* @return
*/
static inline array_t *create_array (const int nrows, const int ncols) {
array_t *array = (array_t *)malloc (nrows * ncols * sizeof (array_t));
if (array == NULL) {
throw_runtime_exception(printfstring("create_array: problem with malloc of size %dx%d", nrows, ncols));
}
return array;
}
/**
* @brief Create an array from an arraydata_t structure
*/
inline array_t *create_array (const arraydata_t *ad) { return create_array (ad->N, ad->ncols); }
/**
* @brief Clone an array
*/
inline array_t *clone_array (const array_t *const array, const rowindex_t nrows, const colindex_t ncols) {
array_t *clone = create_array (nrows, ncols);
copy_array (array, clone, nrows, ncols);
return clone;
}
/*** \brief Class representing an array
*/
struct array_link {
/// Number of rows in array
rowindex_t n_rows;
/// Number of columns in array
colindex_t n_columns;
/// Index number
int index;
/// Pointer to array data
array_t *array;
static const int INDEX_NONE = 0;
static const int INDEX_ERROR = -1;
static const int INDEX_DEFAULT = 0;
/** A class representing an integer valued array
*
*/
array_link ();
/** @copydoc array_link::array_link()
*
* The array is intialized with zeros.
*
* \param nrows Number of rows
* \param ncols Number of columns
* \param index Number to keep track of lists of designs
*/
array_link (rowindex_t nrows, colindex_t ncols, int index);
/** @copydoc array_link::array_link()
*
* Initialize with data from a pointer.
*/
array_link (rowindex_t nrows, colindex_t ncols, int index, carray_t *data);
/** @copydoc array_link::array_link()
*
* Initialize with data from another array_link object.
*/
array_link (const array_link &);
/** @copydoc array_link::array_link()
*
* Initialize with data from an Eigen matrix.
*/
array_link (Eigen::MatrixXd &eigen_matrix);
/** @copydoc array_link::array_link()
*
* The array is initialized by permuting the columns of another array
*
* \param array Source to copy from
* \param column_permutation The permuntation to apply
*/
array_link(const array_link &array, const std::vector< int > &column_permutation);
/// @copydoc array_link::array_link()
array_link(const array_t *array, rowindex_t nrows, colindex_t ncols, int index = 0);
/// @copydoc array_link::array_link()
array_link(const array_t *array, rowindex_t nrows, colindex_t ncolsorig, colindex_t ncols, int index);
/** @copydoc array_link::array_link()
*
* The array is initialized by copying the values from a vector.
*/
array_link(const std::vector< int > &values, rowindex_t nrows, colindex_t ncols, int index = 0);
~array_link ();
#ifdef SWIGCODE
/// Create array_link from a raw memory buffer
array_link (long *pymatinput, int nrows, int ncols);
#endif
array_link clone () const;
public:
/// print an array to output stream
friend std::ostream &operator<< (std::ostream &, const array_link &A);
/// print array to stdout
void showarray () const;
/// print array to string
std::string showarrayString () const;
/// print array to stdout in compact format (no whitespace between elemenents)
void showarraycompact () const;
/// print array properties to stdout
void showproperties () const;
/// return true if the array is a 2-level array (e.g. only contains values 0 and 1)
bool is2level () const;
/// return true is the array is a mixel-level array
bool is_mixed_level() const;
/// return true is the array is array with values in 0, 1, ..., for each column
bool is_orthogonal_array() const;
/** return true if the array is a +1, 0, -1 valued array
*/
bool is_conference () const;
/// return true if the array is a +1, 0, -1 valued array, with specified number of zeros in each column
bool is_conference (int number_of_zeros) const;
/// return true if the array is symmetric
bool isSymmetric () const;
/// make the array symmetric by copying the upper-right to the lower-left
void makeSymmetric ();
/// return array with selected column removed
array_link deleteColumn (int index) const;
/// return array with first number_of_arrays rows
array_link selectFirstRows (int nrows) const;
/// return array with first number_of_arrays columns selected
array_link selectFirstColumns (int ncolumns) const;
/// return array with last number_of_arrays columns selected
array_link selectLastColumns (int ncolumns) const;
/// select columns from an array
array_link selectColumns (const std::vector< int > c) const;
/// select single column from an array
array_link selectColumns (int c) const;
/// set a column of the array to the given vector
void setColumn (int c, const std::vector< int > v) {
std::copy (v.begin (), v.end (), this->array + c * this->n_rows);
}
/// set a column of the array to the given vector
void setColumn (int c, const std::vector< signed char > v) {
std::copy (v.begin (), v.end (), this->array + c * this->n_rows);
}
/// return transposed array
array_link transposed () const;
/// calculate D-efficiency
double Defficiency () const;
/// calculate main effect robustness (or Ds-optimality)
double DsEfficiency (int verbose = 0) const;
/// calculate D-efficiency, calculate main effect robustness (or Ds-optimality) and D1-efficiency for an orthogonal array
std::vector< double > Defficiencies (int verbose = 0, int addDs0 = 0) const;
/*** Calculate average variation inflation factor
*
* If the VIF is infinite, the value 0 is returned. The VIF takes values between 1 and infinity.
*/
double VIFefficiency () const;
/// calculate A-efficiency
double Aefficiency () const;
/// calculate E-efficiency
double Eefficiency () const;
/** Calculate F-values of a 2-level matrix.
*
* This assumes the strength is at least 3. Otherwise use the jstruct_t object
*/
std::vector< int > Fvalues (int number_of_columns) const;
/** Calculate F-values of a conference design
*
* \param number_of_columns Number of columns to use
* \return The Fk vector with k the number of columns specified
*
**/
std::vector< int > FvaluesConference (int number_of_columns) const;
/** Calculate the Jk-characteristics of the matrix (the values are signed)
*
* \param jj Number of columns to use
* \returns Vector with calculated Jk values
*/
std::vector< int > Jcharacteristics (int jj = 4) const;
/// Calculate the projective estimation capacity sequence
std::vector< double > PECsequence (int verbose = 0) const;
/// Calculate the projective information capacity sequence
std::vector< double > PICsequence(int verbose = 0) const;
/// calculate rank of array
int rank () const;
/** Calculate generalized wordlength pattern
*
* @see ::GWLP
*/
std::vector< double > GWLP (int truncate = 1, int verbose = 0) const;
/// calculate strength of an array
int strength () const;
/// return true if the array is a foldover array
bool foldover () const;
// return value of minimum element in array
array_t min () const;
// return value of maximum element in array
array_t max () const;
/** Calculate centered L2 discrepancy
*
* The method is from "A connection between uniformity and aberration in regular fractions of two-level factorials", Fang and Mukerjee, 2000
*/
double CL2discrepancy () const;
/// apply a random permutation of rows, columns and levels of an orthogonal array
array_link randomperm () const;
/// apply a random permutation of columns of an orthogonal array
array_link randomcolperm () const;
/// apply a random permutation of rows of an orthogonal array
array_link randomrowperm () const;
/** Caculate model matrix of an orthogonal array
*
* \param order For 0 return only the intercept; for 1 return intercept and main effects; for 2 return intercept, main effects and interaction effects.
* \param intercept If 1, then include the intercept in the output.
* \param verbose Verbosity level
* \return Calculated model matrix
*
* This function uses @ref array2eigenModelMatrixMixed for the calculation.
*/
MatrixFloat getModelMatrix (int order, int intercept = 1, int verbose = 0) const;
array_link &operator= (const array_link &rhs);
array_link &deepcopy (const array_link &rhs);
array_link &shallowcopy (const array_link &rhs);
/** @brief Return True if both arrays are equal
*
* \param rhs Array to compare to
* \returns 1 if arrays are equal. 0 otherwise. Returns 0 if arrays have different sizes
*/
int operator== (const array_link &rhs) const;
int operator!= (const array_link &rhs) const;
int operator< (const array_link &rhs) const;
int operator> (const array_link &rhs) const;
/// return true of two array have the same dimensions
int equalsize(const array_link &rhs) const;
/// elementwise addition
array_link operator+ (const array_link &) const;
/// elementwise addition
array_link operator+ (array_t value) const;
array_link operator- (const array_link &) const;
array_link operator- (array_t value) const;
/// elementwise multiplication
array_link operator* (const array_link &rhs) const;
array_link operator* (array_t value) const;
array_link operator*= (array_t value);
array_link operator+= (array_t value);
array_link operator-= (array_t value);
/// get element from array, no error checking, inline version
inline const array_t &atfast (const rowindex_t r, const colindex_t c) const {
return this->array[r + this->n_rows * c];
}
/// get element from array, no error checking, inline version
inline array_t &atfast (const rowindex_t r, const colindex_t c) { return this->array[r + this->n_rows * c]; }
/// get element at specified position, no bounds checking
array_t _at (const rowindex_t, const colindex_t) const;
/// get element at specified position, no bounds checking
array_t _at (const int index) const;
/// get element at specified position
array_t at (const rowindex_t, const colindex_t) const;
/// get element at specified position
array_t at (const int index) const;
/// get element at specified position
array_t &at (const rowindex_t, const colindex_t);
/// set all elements in the array to a value
void setconstant (array_t value);
/// set value of an array
void setvalue (int row, int col, int value);
/// set value of an array
void setvalue (int row, int col, double value);
/// set value of an array, no bounds checking!
void _setvalue (int row, int col, int value);
/// multiply a row by -1
void negateRow (rowindex_t row);
/// print information about array
void show () const;
/// return string describing the array
std::string showstr () const;
/// return md5 sum of array representation (as represented with 32bit int datatype in memory)
std::string md5 () const;
/// return true if two columns are equal
bool columnEqual (int column_index, const array_link &rhs, int column_index_rhs) const;
/// return index of first different column
int firstColumnDifference (const array_link &A) const;
/** Calculate row and column index of first difference between two arrays
*
* The difference is according to the column-major ordering.
*/
bool firstDiff(const array_link &A, int &r, int &c, int verbose = 1) const;
/// create root in arraylink
void create_root (const arraydata_t &arrayclass, int fill_value = 0);
/// return fraction of nonzero elements in array
double nonzero_fraction () const;
/// fill array with zeros
void clear();
// getarraydata (Python interface). this needs to be of type int32 (default python int type)
void getarraydata (int *pymat1, int n) { std::copy (this->array, this->array + n, pymat1); }
/// internal function
template < class numtype > void setarraydata (const numtype *tmp, int n) {
if (n != this->n_rows * this->n_columns)
myprintf ("array_link:setarraydata: warning: number of elements incorrect: n %d, %d %d\n", n,
this->n_rows, this->n_columns);
std::copy (tmp, tmp + n, this->array);
}
/// internal function
template < class numtype > void setarraydata_transposed (const numtype *input_data, int n) {
if (n != this->n_rows * this->n_columns)
myprintf ("array_link:setarraydata: warning: number of elements incorrect: n %d, %d %d\n", n,
this->n_rows, this->n_columns);
int i = 0;
for (int row = 0; row < this->n_rows; row++) {
for (int col = 0; col < this->n_columns; col++) {
this->array[row + col * this->n_rows] = input_data[i];
i++;
}
}
}
/// special method for SWIG interface
void setarraydata (std::vector< int > tmp, int n) { std::copy (tmp.begin (), tmp.begin () + n, this->array); }
/// internal function
template < class numtype > void setarraydata (std::vector< numtype > tmp, int n) {
std::copy (tmp.begin (), tmp.begin () + n, this->array);
}
/// set column to values
void setcolumn (int target_column, const array_link &source_array, int source_column = 0) const;
public:
void init (rowindex_t r, colindex_t c); // made public for python interface
/// return the row_symmetry group of an array
symmetry_group row_symmetry_group () const;
/// return the LMC form of the array
array_link reduceLMC () const;
/// return the delete-one-factor-projection form of the array
array_link reduceDOP () const;
/// return the array as an Eigen matrix
MatrixFloat getEigenMatrix() const;
/// return true of specified column is smaller than column in another array
int columnGreater (int c1, const array_link &rhs, int rhs_column) const;
void debug () const;
#ifdef SWIGCODE
void *data (); /// return pointer to data, needed for swig interface
#endif
private:
/// return true if both arrays have the same size
bool equal_size(const array_link &array) const;
bool _valid_index (const rowindex_t r, const colindex_t c) const;
bool _valid_index (int index) const;
};
#ifdef SWIGCODE
/// Create array_link from numpy array
array_link create_array_link(long* pymatinput, int number_of_rows, int number_of_columns);
/// Update the data of an array_link with the specified data
void update_array_link(array_link &al, long* pymatinput, int number_of_rows, int number_of_columns);
#endif
/** Return -1 if the first array is smaller in LMC ordering than the second array, 0 if equal and 1 otherwise **/
int compareLMC(const array_link &lhs, const array_link &rhs);
/** Return example array
*
* \param idx Index of example array to return
* \param verbose If True, then print information about the array to stdout
*/
array_link exampleArray(int idx = 0, int verbose = 0);
/** Calculate Jk-characteristics for a conference design
*
* \param array Conference design
* \param number_of_columns Specifies the number of columns to use
* \param verbose Verbosity level
* \return A vector of calculated inner products between all combinations of k columns.
*/
std::vector< int > Jcharacteristics_conference(const array_link &array, int number_of_columns, int verbose = 0);
/// data type for elements of conference designs
typedef signed char conf_t;
/// data type for column of a conference design
typedef std::vector< conf_t > conference_column;
/// list of columns of conference designs
typedef std::vector< conference_column > conference_column_list;
/// concatenate 2 arrays in vertical direction
array_link hstack (const array_link &array1, const array_link &array2);
/// concatenate array and conference_column
array_link hstack (const array_link &array, const conference_column &column);
/// concatenate 2 arrays in horizontal direction
array_link hstack (const array_link &array_left, const array_link &array_right);
/// concatenate the last column of array B to array A
array_link hstacklastcol (const array_link &A, const array_link &B);
/// concatenate two columns
conference_column vstack(const conference_column &column_top, const conference_column &column_bottom);
/// perform column permutation for an array
void perform_column_permutation (const array_link source, array_link &target, const std::vector< int > perm);
/// perform row permutation for an array
void perform_row_permutation (const array_link source, array_link &target, const std::vector< int > perm);
/** create arraydata_t structure from array
*
* \param array Array to use as input specifiction for array class
* \param extracols Number of extra columns to add to the number of columns of the array
* \param strength Strength to set in the array class. If -1, then use the strength of the array
*/
arraydata_t arraylink2arraydata (const array_link &array, int extracols = 0, int strength = 2);
/// container with arrays
typedef std::deque< array_link > arraylist_t;
/// add a constant value to all arrays in a list
arraylist_t addConstant (const arraylist_t &lst, int value);
/** Return number of arrays with j_{2n+1}=0 for number_of_arrays<m */
std::vector< int > getJcounts (arraylist_t *arraylist, int N, int k, int verbose = 1);
/**
* @brief struct to hold data of an array, e.g. J-characteristic. Abstract base class
*
*/
class jstructbase_t {
public:
/// calculated J-characteristics
std::vector< int > values;
// possible values for Jk-characteristics
std::vector< int > jvalues;
/// map from Jk-value to index in the jvalues variable
std::map< int, int > jvalue2index;
/// number of columns
int jj;
public:
/// calculate maximum J value
int maxJ () const;
/// calculate possible values in F vector
std::vector< int > Jvalues () const { return this->jvalues; }
/** Calculate histogram of J values
*
* \return Histogram of J values
*
* The histogram bins are given by the values of @ref Jvalues.
*
**/
std::vector< int > calculateF () const;
/// Calculate the J-values for a given array
virtual void calc (const array_link &array) = 0;
/// Show contents of structure
void show ();
void showdata (int verbose = 1);
std::string showstr ();
/// return 1 if all vals are zero
int allzero () {
for (size_t i = 0; i < this->jvalues.size (); ++i) {
if (this->jvalues[i] != 0) {
return 0;
}
}
return 1;
}
};
/// structure containing data related to symmetries of arrays
struct symmdata {
public:
array_link rowvalue;
array_link orig;
array_link ft;
symmdata (const array_link &al, int minlen = 1);
void show (int verbose = 1) const {
myprintf ("symmdata: rowvalues\n");
this->rowvalue.showarray ();
if (verbose >= 2) {
myprintf ("symmdata: ft:");
this->ft.show ();
this->ft.showarray ();
}
}
/// list with indices set to check for symmetry reductions
std::vector< int > checkIdx (int col = -1) const {
const int N = this->orig.n_rows;
if (col < 0) {
col = orig.n_columns - 1;
}
std::vector< int > idx (N);
// never check first index
for (int row = 1; row < N; row++) {
if (this->rowvalue._at (row, col) == this->rowvalue._at (row - 1, col)) {
idx[row] = 1;
}
}
return idx;
}
};
/**
* @brief struct to hold data of an array, e.g. J-characteristic, rank
*
* See papers: Minimum G2-aberration properties of two-level foldover designs, Butler, 2004
* Design Selection and Classification for Hadamard Matrices Using Generalized Minimum Aberration Criteria,
* Deng and Tang
*
*/
class jstruct_t {
public:
/// number of rows in array
int N;
/// number of columns in array
int k;
/// J-characteristic that is calculated
int jj;
/// number of column combinations possible
int nc;
/// contains calculated J-values
std::vector< int > values;
/// calculated abberation
double abberration;
public:
/// Create an object to calculate J-characteristics
jstruct_t ();
/// Create an object to calculate J-characteristics
jstruct_t (const array_link &al, int jj = 4);
/// @copydoc jstruct_t::jstruct_t()
jstruct_t (const int N, const int K, const int jj = 4);
/// @copydoc jstruct_t::jstruct_t()
jstruct_t (const jstruct_t &js);
~jstruct_t ();
public:
jstruct_t &operator= (const jstruct_t &rhs);
/// calculate maximum J value
int maxJ () const;
/// Calculate the number of possible J values that can occur for the given strength
int number_J_values(int strength) const;
/** Calculate possible values in F vector
*
* \param strength Strength to use
* \return Vector with possible Jk values (ordered from high to low)
*
*/
std::vector< int > Fval (int strength = 3) const;
/// calculate histogram of J values for a 2-level array
std::vector< int > calculateF (int strength = 3) const;
/** Calculate aberration value
*
* This is equal to the sum of the squares of all Jk values, divided by the number of rows squared.
*
* The calculated abberation is stored in the variable abberation.
**/
void calculateAberration();
/// Show contents of structure
void show () const;
void showdata ();
std::string showstr ();
/// return 1 if all J values are zero, otherwise return 0
int allzero() const;
private:
/// init data structures
void init(int N, int k, int jj);
/// calculate J-characteristics of a 2-level array
void calc(const array_link &al);
/// calculate J-characteristics of a 2-level array, special function for jj=4
void calcj4(const array_link &al);
/// calculate J-characteristics of a 2-level array, special function for jj=5
void calcj5(const array_link &al);
};
/** Calculate J-characteristics of conference designs
*
**/
class jstructconference_t : public jstructbase_t {
public:
/** Create structure to calculate J-characteristics of conference designs
*
* \param N Number of rows
* \param jj Number of columns to use for the Jk-characteristics
**/
jstructconference_t (int N, int jj = 4) {
this->jj = jj;
calcJvalues (N, jj);
}
/** Calculate J-characteristics of a conference design
*
* \param array Array to calculate the J-characteristics for
* \param jj Number of columns to use for the Jk-characteristics
**/
jstructconference_t (const array_link &array, int jj = 4) {
this->jj = jj;
const int N = array.n_rows;
calcJvalues (N, jj);
calc (array);
}
private:
void calcJvalues(int N, int jj);
void calc(const array_link &al);
};
/// set first columns of an array to root form
void create_root (array_t *array, const arraydata_t *arrayclass);
/// Creates the root of an orthogonal array. The root is appended to the list of arrays
void create_root (const arraydata_t *arrayclass, arraylist_t &solutions);
/// Compare 2 arrays and return position of first difference
int array_diff (carray_p A, carray_p B, const rowindex_t r, const colindex_t c, rowindex_t &rpos, colindex_t &cpos);
/// helper function to calculate J-values
inline void fastJupdate (const array_t *array, rowindex_t N, const int J, const colindex_t *column_indices, array_t *tmp) {
for (int i = 0; i < J; i++) {
carray_t *cp = array + N * column_indices[i];
for (rowindex_t r = 0; r < N; r++) {
tmp[r] += cp[r];
}
}
return;
}
/** Calculate J-value for a 2-level array
*/
int jvalue (const array_link &array, const int J, const int *column_indices);
/** Calculate J-value for a column combination of a 2-level array
*
* We assume the array has values 0 and 1. No boundary checks are performed.
*/
int jvaluefast (const array_t *array, rowindex_t N, const int J, const colindex_t *column_indices);
/// Analyse a list of arrays
std::vector< jstruct_t > analyseArrays (const arraylist_t &arraylist, const int verbose, const int jj = 4);
/** \brief Contains a transformation of an array
*
* Contains an array transformation. The transformation consists of column, row and
* level permutations. The level and column permutations are not commutative (since the level permutations
* are tied to a particular column). We apply the column permutations first.
*
*/
class array_transformation_t {
public:
/// row permutation
rowperm_t rperm;
/// column permutation
colperm_t cperm;
/// level permutations
levelperm_t *lperms;
/// type of array
const arraydata_t *ad;
public:
array_transformation_t (const arraydata_t *arrayclass);
array_transformation_t (const arraydata_t &arrayclass);
array_transformation_t ();
/// copy constructor
array_transformation_t (const array_transformation_t &transformation);
/// assignment operator
array_transformation_t &operator= (const array_transformation_t &at);
~array_transformation_t ();
/// show the array transformation
void show () const;
/// return true if the transformation is equal to the identity
bool isIdentity () const;
/// return the inverse transformation
array_transformation_t inverse () const;
/// return the transformation to the identity transformation
void reset ();
/// initialize to a random transformation
void randomize ();
/// initialize with a random column permutation
void randomizecolperm ();
/// initialize with a random row permutation
void randomizerowperm ();
/// apply transformation to an array_link object
array_link apply(const array_link &array) const;
/// Comparison operator
int operator== (const array_transformation_t &t2) const;
/// composition operator. the transformations are applied from the left
array_transformation_t operator* (const array_transformation_t b) const;
/// apply transformation to an array (inplace)
void apply (array_t *sourcetarget) const;
/// apply transformation to an array
void apply (const array_t *source, array_t *target) const;
/// apply transformation and show resulting array
void print_transformed (carray_t *source) const;
void show (std::ostream &out) const;
/// return the row permutation of the transformation
std::vector< int > rowperm () const;
/// return the column permutation of the transformation
std::vector< int > colperm () const;
/// return the level permutations of the transformation
std::vector< int > lvlperm (int c) const;
/// set the row permutation of the transformation
void setrowperm (std::vector< int > row_permutation);
/// set the column permutation of the transformation
void setcolperm (std::vector< int > column_permutation);
/// set the level permutation of the transformation
void setlevelperm (int column_index, std::vector< int > lvl_permutation);
private:
/// initialize permutation structures
void allocate_data_structures ();
/// free permutation structures and arraydata_t structure
void free_data_structures ();
};
/** \brief Contains a transformation of a conference matrix
*
* Contains an array transformation. The transformation consists of column permutations, row permutations and sign
* switches for both the rows and columns.
*
* The sign switches and the permutations are not commutative. We apply the permutations first and then the sign flips.
*
*/
class conference_transformation_t {
public:
/// row permutation of the transformation
std::vector< int > rperm;
/// column permutation of the transformation
std::vector< int > cperm;
/// sign flips for the columns
std::vector< int > cswitch;
/// sign flips for the rows
std::vector< int > rswitch;
/// number of rows
int nrows;
/// number of columns
int ncols;
public:
conference_transformation_t (); /// default constructor
conference_transformation_t (int nrows, int ncols);
conference_transformation_t (const array_link &al);
conference_transformation_t (const conference_transformation_t &T);
/// show the array transformation
void show (int verbose = 1) const;
/// return true if the transformation is equal to the identity
bool isIdentity () const;
/// return the inverse transformation
conference_transformation_t inverse () const;
/// return the transformation to the identity transformation
void reset ();
/// initialize to a random transformation
void randomize ();
/// initialize with a random column permutation
void randomizecolperm ();
/// initialize with a random row permutation
void randomizerowperm ();
/// initialize with random col switches
void randomizecolflips ();
/// initialize with random row switches
void randomizerowflips ();
/// apply transformation to an array_link object
array_link apply (const array_link &al) const;
int operator== (const conference_transformation_t &rhs) const;
/** composition operator. the transformations are applied from the left
*
* E.g. (T1*T2)(x) = T1(T2(x))
*
*/
conference_transformation_t operator* (const conference_transformation_t &rhs) const;
void setrowperm (std::vector< int > rp) { rperm = rp; };
void setcolperm (std::vector< int > cp) { cperm = cp; };
private:
void init (int nr, int nc); //< initialize permutation structures
};
/* functions for working with array files*/
/// print a list of arrays to stdout
void showArrayList (const arraylist_t &lst);
#ifdef FULLPACKAGE
namespace arrayfile {
/// file format mode
enum arrayfilemode_t {
/// text based format
ATEXT,
/// write arrays to a text file in a format that can be parsed by LaTeX
ALATEX,
/// binary format
ABINARY,
/// binary format storing differences of arrays
ABINARY_DIFF,
/// binary format storing differences of arrays and zero offsets
ABINARY_DIFFZERO,
AERROR,
/// automatically determine the format
A_AUTOMATIC,
/// automatically determine the format (but binary)
A_AUTOMATIC_BINARY
};
/// file mode for array file
enum afilerw_t { READ, WRITE, READWRITE };
/** @brief Structure for reading or writing a file with arrays
*
* The format of the file is determined by the ``arrayfilemode_t``
* The format described in detail in the documentation of the OApackage https://oapackage.readthedocs.io/en/latest/.
*
*/
struct arrayfile_t {
public:
/// location of file on disk
std::string filename;
/// True of the file is compressed with gzip
int iscompressed;
/// number of rows of the arrays
int nrows;
/// number of columns of the arrays
int ncols;
/// number of bits used when storing an array
int nbits;
/// file mode, can be ATEXT or ABINARY, ABINARY_DIFF, ABINARY_DIFFZERO
arrayfilemode_t mode;
/// file opened for reading or writing
afilerw_t rwmode;
// we cannot define SWIG variables as int32_t, we get errors in the Python module
/// number of arrays in the file
int narrays;
int narraycounter;
/// maximum number of arrays in structure
static const int NARRAYS_MAX = 2 * 1000 * 1000 * 1000;
public:
/** Structure for reading or writing a file with arrays
*/
arrayfile_t ();
/** @copydoc arrayfile_t::arrayfile_t()
*
* \param filename File to open for reading
* \param verbose Verbosity level
*/
arrayfile_t (const std::string filename, int verbose = 1);
/** @copydoc arrayfile_t::arrayfile_t()
*
* Open new array file for writing
*
* \param filename File to open
* \param nrows Number of rows
* \param ncols Number of columns
* \param narrays Specify a number of arrays, or -1 to add dynamically
* \param mode File mode
* \param number_of_bits Number of bits to use for storage. For 2-level arrays only 1 bit is needed
*/
arrayfile_t (const std::string filename, int nrows, int ncols, int narrays = -1, arrayfilemode_t mode = ATEXT,
int number_of_bits = 8);
/// destructor function, closes all filehandles
~arrayfile_t ();
/// Open a new file for writing and (if opened) close the current file
void createfile (const std::string filename, int nrows, int ncols, int narrays = -1, arrayfilemode_t m = ATEXT,
int number_of_bits = 8);
/// close the array file
void closefile ();
/// return true if file is open
int isopen () const;
/// seek to specified array position
int seek (int pos);
/// read array and return index
int read_array (array_link &a);
/// read next array from the file
array_link readnext ();
/// read set of array from the file
arraylist_t readarrays (int nmax = NARRAYS_MAX, int verbose = 1);
/// flush any open file pointer
void flush ();
/// return true if the file has binary format
bool isbinary () const;
/// append list of arrays to the file
int append_arrays (const arraylist_t &arrays, int startidx = -1);
/// append a single array to the file
void append_array (const array_link &a, int specialindex = -1);
/// Add a comment to an array file (only available in text mode)
void add_comment(const std::string &comment);
/// return True if code is wrapped by SWIG
int swigcheck () const;
/// return string describing the object
std::string showstr () const;
/// return current position in file
size_t pos () const { return narraycounter; }
/// return true of the file format has random access mode
bool hasrandomaccess () const { return (this->mode == ABINARY); }
private:
public:
FILE *nfid;
#ifdef USEZLIB
/// pointer to compressed file
gzFile gzfid;
#else
/// pointer to compressed file
int gzfid;
#endif
/// verbosity level
int verbose;
private:
array_link diffarray;
/// return header size for binary format array
int headersize () const;
/// return size of bit array
int barraysize () const;
/// wrapper function for fwrite or gzwrite
size_t afwrite (void *ptr, size_t t, size_t n);
/// wrapper function for fread or gzread
size_t afread (void *ptr, size_t sz, size_t cnt);
public:
// update numbers count for a file structure
void updatenumbers ();
/// read array and return index
int read_array (array_t *array, const int nrows, const int ncols);
void finisharrayfile();
/// set verbosity level
void setVerbose(int v);
private:
int read_array_binary_zero (array_link &a);
void write_array_binary (carray_t *array, const int nrows, const int ncols);
void write_array_binary (const array_link &A);
/** Write an array in binary diff mode to a file
*
* We only write the section of columns of the array that differs from the previous array.
*/
void write_array_binary_diff (const array_link &A);
/** Write an array in binary diffzero mode */
void write_array_binary_diffzero (const array_link &A);
public:
int getnbits ();
/// parse string to determine the file mode
static arrayfile::arrayfilemode_t parseModeString (const std::string format);
/// return number of bits necessary to store an array
static int arrayNbits (const arraydata_t &ad) {
int m = 0;
for (int i = 0; i < ad.ncols; ++i) {
if (ad.s[i] > m) {
m = ad.s[i];
}
}
if (m == 2) {
return 1; // bit
} else if (m < 120) {
return 8; // char
} else {
return 32; // int32_t
}
}
/// return number of bits necessary to store an array
static int arrayNbits (const array_link &A) {
int m = A.max ();
int amin = A.min ();
m = std::max (m, -amin + 1);
if (m == 1) {
return 1; // bit
} else if (m < 124) {
return 8; // char
} else {
return 32; // int32_t
}
};
protected:
void writeheader ();
/// Read a binary array from a file
void read_array_binary (array_t *array, const int nrows, const int ncols);
};
}
using namespace arrayfile;
/// return number of arrays in an array file
long nArrays (const char *fname);
/** return information about file with arrays
*
* \param filename Filename of array file
* \param number_of_arrays Variable is set with number of arrays
* \param number_of_rows Variable is set with number of rows
* \param number_of_columns Variable is set with number of columns
*/
void arrayfileinfo(const char *filename, int &number_of_arrays, int &number_of_rows, int &number_of_columns);
/** Read all arrays in a file
*
* @param fname Filename to read from
* @param verbose Verbosity level
* @param setcols Pointer to return number of columns from array file
* @return List of arrays
*/
arraylist_t readarrayfile (const char *fname, int verbose = 1, int *setcols = 0);
/** Read all arrays in a file and append then to an array list
*
* @param filename Filename to read from
* @param arraylist Pointer to list of arrays
* @param verbose Verbosity level
* @param setcols Reference that is set with the number of columns from the file
* @param setrows Reference that is set with the number of rows from the file
* @param setbits Reference that is set with the number of bits from the file
* @return
*/
int readarrayfile(const char *filename, arraylist_t *arraylist, int verbose = 1, int *setcols = 0,
int *setrows = 0, int *setbits = 0);
const int NRAUTO = 0;
/** Write a list of arrays to file on disk
*
* @param filename Filename to use
* @param arraylist List of arrays to write
* @param mode Mode for the file with designs
* @param nrows If the list of arrays is empty, use this number of rows for the design file
* @param ncols If the list of arrays is empty, use this number of rows for the design file
* @return Value zero if succesfull
*/
int writearrayfile (const char *filename, const arraylist_t &arraylist, arrayfile::arrayfilemode_t mode = arrayfile::ATEXT,
int nrows = NRAUTO, int ncols = NRAUTO);
/// Write a single array to file
int writearrayfile (const char *filename, const array_link &array, arrayfile::arrayfilemode_t mode = arrayfile::ATEXT);
/// Append a single array to an array file. creates a new file if no file exists
int append_arrayfile (const char *filename, const array_link array);
/// Make a selection of arrays from binary array file, append to list
void selectArrays (const std::string filename, std::vector< int > &idx, arraylist_t &fl, int verbose = 0);
/// Select a single array from a file
array_link selectArrays (std::string filename, int index);
#endif // FULLPACKAGE
/// Make a selection of arrays
arraylist_t selectArrays (const arraylist_t &input_list, std::vector< int > &idx);
/// Make a selection of arrays
arraylist_t selectArrays (const arraylist_t &input_list, std::vector< long > &idx);
/// Make a selection of arrays, append to list
void selectArrays (const arraylist_t &input_list, std::vector< int > &idx, arraylist_t &output_list);
/// Make a selection of arrays, append to list
void selectArrays (const arraylist_t &input_list, std::vector< long > &idx, arraylist_t &output_list);
/// From a container keep all elements with specified indices
template < class Container, class IntType > void keepElements (Container &al, std::vector< IntType > &idx) {
for (int jj = idx.size () - 1; jj >= 0; jj--) {
if (!idx[jj]) {
al.erase (al.begin () + jj);
}
}
}
/// From a container remove all elements with specified indices
template < class Container, class IntType > void removeElements (Container &al, std::vector< IntType > &idx) {
for (int jj = idx.size () - 1; jj >= 0; jj--) {
if (idx[jj]) {
al.erase (al.begin () + jj);
}
}
}
/// Make a selection of arrays from a list, append to list
template < class MType >
void selectArraysMask (const arraylist_t &al, std::vector< MType > &mask, arraylist_t &rl) {
myassert (al.size () == mask.size ());
for (int idx = 0; idx < al.size (); idx++) {
if (mask[idx]) {
rl.push_back (al.at (idx));
}
}
}
/// Append selection of arrays to existing list
template < class IndexType >
void appendArrays (const arraylist_t &al, const typename std::vector< IndexType > &idx, arraylist_t &lst) {
for (typename std::vector< IndexType >::const_iterator it = idx.begin (); it < idx.end (); ++it) {
lst.push_back (al.at (*it));
}
}
/// Append set of arrays to existing list
void appendArrays(const arraylist_t &arrays_to_append, arraylist_t &dst);
/** Write a formatted array
*/
template < class atype >
void write_array_format (const atype *array, const int nrows, const int ncols, int width = 3) {
int count;
for (int j = 0; j < nrows; j++) {
count = j;
for (int k = 0; k < ncols; k++) {
const char *s = (k < ncols - 1) ? " " : "\n";
myprintf ("%3i%s", static_cast< int > (array[count]), s);
count += nrows;
}
}
#ifdef FULLPACKAGE
fflush (stdout);
setbuf (stdout, NULL);
#endif
}
/** @brief Write an array to a file pointer
*/
template < class atype > void write_array_format (FILE *fid, const atype *array, const int nrows, const int ncols) {
int count;
for (int j = 0; j < nrows; j++) {
count = j;
for (int k = 0; k < ncols; k++) {
const char *s = (k < ncols - 1) ? " " : "\n";
fprintf (fid, "%3i%s", static_cast< int > (array[count]), s);
count += nrows;
}
}
}
/// write an array in latex style
template < class atype >
void write_array_latex (std::ostream &ss, const atype *array, const int nrows, const int ncols) {
int count;
ss << "\\begin{tabular}{";
for (int x = 0; x < ncols; x++) {
ss << 'c';
}
ss << "}" << std::endl;
for (int j = 0; j < nrows; j++) {
count = j;
for (int k = 0; k < ncols; k++) {
const char *s = (k < ncols - 1) ? " & " : " \\\\ \n";
ss << array[count] << s;
count += nrows;
}
}
ss << "\\end{tabular}" << std::endl;
}
/** Convert a file with arrays to a different format
*/
void convert_array_file(std::string input_filename, std::string output_filename, arrayfile::arrayfilemode_t output_format, int verbose = 0);
/// structure to write arrays to disk, thread safe
struct arraywriter_t {
public:
/** Pointers to different data files.
*
* Since depth_extend is a depth first approach we need to store arrays with a different number of columns
**/
std::vector< arrayfile_t * > afiles;
/// only write arrays if this variable is true
bool writearrays;
/// number of arrays written to disk
int nwritten;
/// verbosity level
int verbose;
public:
arraywriter_t () {
writearrays = true;
verbose = 1;
};
~arraywriter_t () {
flush ();
closeafiles ();
}
/// flush all output files
void flush () {
for (size_t i = 0; i < afiles.size (); i++) {
arrayfile_t *af = afiles[i];
if (af != 0) {
#pragma omp critical
af->updatenumbers ();
af->flush ();
}
}
}
/// write a single array to disk
void writeArray (const array_link &A) {
// writing arrays with multiple threads at the same time is not supported
#ifdef DOOPENMP
#pragma omp critical
#endif
{
int i = A.n_columns;
if (writearrays) {
if (i < (int)afiles.size () && i >= 0) {
afiles[i]->append_array (A);
} else {
fprintf (stderr, "depth_extend_t: writeArray: problem: array file for %d "
"columns was not opened\n",
(int)i);
}
nwritten++;
}
}
}
/// write a list of arrays to disk
void writeArray (const arraylist_t &lst) {
for (size_t j = 0; j < lst.size (); j++) {
const array_link &A = lst[j];
writeArray (A);
}
}
/// initialize the result files
void initArrayFiles (const arraydata_t &ad, int kstart, const std::string prefix,
arrayfilemode_t mode = ABINARY_DIFF) {
afiles.clear ();
afiles.resize (ad.ncols + 1);
nwritten = 0;
for (size_t i = kstart; i <= (size_t)ad.ncols; i++) {
arraydata_t ad0 (&ad, i);
std::string afile = prefix + "-" + ad0.idstr () + ".oa";
if (verbose >= 3)
myprintf ("depth_extend_t: creating output file %s\n", afile.c_str ());
int nb = arrayfile_t::arrayNbits (ad);
afiles[i] = new arrayfile_t (afile, ad.N, i, -1, mode, nb);
}
}
/// return the total number arrays written to disk
int nArraysWritten () const { return nwritten; }
public:
void closeafiles () {
for (size_t i = 0; i < afiles.size (); i++) {
delete afiles[i];
}
afiles.clear ();
}
};
/** Read header for binary data file. Return true if valid header file
*
* The header consists of 4 integers: 2 magic numbers, then the number of rows and columns
*/
bool readbinheader(FILE *fid, int &nr, int &nc);
/// Write header for binary data file
void writebinheader(FILE *fid, int number_rows, int number_columns);
/// Write a vector of numeric elements to binary file as double values
template < class Type >
void vector2doublebinfile (const std::string fname, std::vector< Type > vals, int writeheader = 1) {
FILE *fid = fopen (fname.c_str (), "wb");
if (fid == 0) {
fprintf (stderr, "doublevector2binfile: error with file %s\n", fname.c_str ());
throw_runtime_exception("doublevector2binfile: error with file");
}
if (writeheader) {
writebinheader (fid, vals.size (), 1);
}
for (unsigned int i = 0; i < vals.size (); i++) {
double x = vals[i];
fwrite (&x, sizeof (double), 1, fid);
}
fclose (fid);
}
/// Write a vector of vector elements to binary file
void vectorvector2binfile(const std::string fname, const std::vector< std::vector< double > > vals,
int writeheader, int na);
/** Convert 2-level array to main effects in Eigen format
*
* \param array Array to convert
* \param intercept If True, then include the intercept
* \returns The main effects model
*/
MatrixFloat array2eigenX1 (const array_link &array, int intercept = 1);
/** Convert 2-level array to second order interaction matrix in Eigen format
*
* The intercept and main effects are not included.
*
* \param array Array to convert
* \returns The second order interaction model
*/
MatrixFloat array2eigenX2 (const array_link &array);
/** Convert 2-level array to second order interaction model matrix (intercept, main effects, interaction effects)
*
* \param array Design of which to calculate the model matrix
* \returns Eigen matrix with the model matrix
*/
MatrixFloat array2eigenModelMatrix (const array_link &array);
/** Create first and second order model matrix for mixed-level orthogonal array
*
* \param array Input array
* \param verbose Verbosity level
* \returns Pair with main effects and two-factor interaction model
*
* For 2-level arrays a direct calculation is used. For mixel-level arrays Helmert contrasts are used.
*/
std::pair< MatrixFloat, MatrixFloat > array2eigenModelMatrixMixed (const array_link &array, int verbose = 1);
/** Calculate number of parameters in the model matrix
*
* A list of integers is returned, with the number of columns in:
*
* - The intercept (always 1)
* - The main effects
* - The interaction effects (second order interaction terms without quadratics)
* - The quadratic effects
*
* \param array Orthogonal array or conference design
* \param order Not used any more
* \returns List of sizes
*/
std::vector< int > numberModelParams(const array_link &array, int order = -1);
/** return index of specified array in a file. returns -1 if array is not found
*
* \param array Array to find
* \param array_file Location if file with arrays
* \param verbose Verbosity level
* \returns Position of array in list
*/
int arrayInFile (const array_link &array, const char *array_file, int verbose = 1);
/** return index of specified array in a list. returns -1 if array is not found
*
* \param array Array to find
* \param arrays List of arrays
* \param verbose Verbosity level
* \returns Position of array in list
*/
int arrayInList (const array_link &array, const arraylist_t &arrays, int verbose = 1);
|
app_baseline.c | #include <assert.h>
#include <getopt.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "../../support/timer.h"
#include <omp.h>
#define T int64_t
static int pos;
static T *A;
static T *B;
static T *C;
static T *C2;
// Create a "test file"
static T *create_test_file(unsigned int nr_elements) {
// srand(0);
A = (T *)malloc(nr_elements * sizeof(T));
B = (T *)malloc(nr_elements * sizeof(T));
C = (T *)malloc(nr_elements * sizeof(T));
printf("nr_elements\t%u\t", nr_elements);
for (int i = 0; i < nr_elements; i++) {
// A[i] = (unsigned int) (rand());
// A[i] = i+1;
// A[i] = i%2==0?i+1:i;
A[i] = i % 2 == 0 ? i : i + 1;
B[i] = 0;
}
return A;
}
// Compute output in the host
static int unique_host(int size, int t) {
pos = 0;
C[pos] = A[pos];
omp_set_num_threads(t);
#pragma omp parallel for
for (int my = 1; my < size; my++) {
if (A[my] != A[my - 1]) {
int p;
#pragma omp atomic update
pos++;
p = pos;
C[p] = A[my];
}
}
return pos;
}
// Params
typedef struct Params {
int input_size;
int n_warmup;
int n_reps;
int n_threads;
} Params;
void usage() {
fprintf(stderr,
"\nUsage: ./program [options]"
"\n"
"\nGeneral options:"
"\n -h help"
"\n -t <T> # of threads (default=8)"
"\n -w <W> # of untimed warmup iterations (default=1)"
"\n -e <E> # of timed repetition iterations (default=3)"
"\n"
"\nBenchmark-specific options:"
"\n -i <I> input size (default=8M elements)"
"\n");
}
struct Params input_params(int argc, char **argv) {
struct Params p;
p.input_size = 16 << 20;
p.n_warmup = 1;
p.n_reps = 3;
p.n_threads = 8;
int opt;
while ((opt = getopt(argc, argv, "hd:i:w:e:t:")) >= 0) {
switch (opt) {
case 'h':
usage();
exit(0);
break;
case 'i':
p.input_size = atoi(optarg);
break;
case 'w':
p.n_warmup = atoi(optarg);
break;
case 'e':
p.n_reps = atoi(optarg);
break;
case 't':
p.n_threads = atoi(optarg);
break;
default:
fprintf(stderr, "\nUnrecognized option!\n");
usage();
exit(0);
}
}
assert(p.n_threads > 0 && "Invalid # of ranks!");
return p;
}
// Main
int main(int argc, char **argv) {
struct Params p = input_params(argc, argv);
const unsigned int file_size = p.input_size;
uint32_t accum = 0;
int total_count;
// Create an input file with arbitrary data
create_test_file(file_size);
Timer timer;
start(&timer, 0, 0);
total_count = unique_host(file_size, p.n_threads);
stop(&timer, 0);
printf("Total count = %d\t", total_count);
printf("Kernel ");
print(&timer, 0, 1);
printf("\n");
free(A);
free(B);
free(C);
return 0;
}
|
GrB_Semiring_wait.c | //------------------------------------------------------------------------------
// GrB_Semiring_wait: wait for a user-defined GrB_Semiring to complete
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// In SuiteSparse:GraphBLAS, a user-defined GrB_Semiring has no pending
// operations to wait for. All this method does is verify that the semiring is
// properly initialized, and then it does an OpenMP flush.
#include "GB.h"
GrB_Info GrB_Semiring_wait // no work, just check if the GrB_Semiring is valid
(
GrB_Semiring *semiring
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
#pragma omp flush
GB_WHERE1 ("GrB_Semiring_wait (&semiring)") ;
GB_RETURN_IF_NULL (semiring) ;
GB_RETURN_IF_NULL_OR_FAULTY (*semiring) ;
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
program_evaluator.h | // Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: keir@google.com (Keir Mierle)
//
// The ProgramEvaluator runs the cost functions contained in each residual block
// and stores the result into a jacobian. The particular type of jacobian is
// abstracted out using two template parameters:
//
// - An "EvaluatePreparer" that is responsible for creating the array with
// pointers to the jacobian blocks where the cost function evaluates to.
// - A "JacobianWriter" that is responsible for storing the resulting
// jacobian blocks in the passed sparse matrix.
//
// This abstraction affords an efficient evaluator implementation while still
// supporting writing to multiple sparse matrix formats. For example, when the
// ProgramEvaluator is parameterized for writing to block sparse matrices, the
// residual jacobians are written directly into their final position in the
// block sparse matrix by the user's CostFunction; there is no copying.
//
// The evaluation is threaded with OpenMP.
//
// The EvaluatePreparer and JacobianWriter interfaces are as follows:
//
// class EvaluatePreparer {
// // Prepare the jacobians array for use as the destination of a call to
// // a cost function's evaluate method.
// void Prepare(const ResidualBlock* residual_block,
// int residual_block_index,
// SparseMatrix* jacobian,
// double** jacobians);
// }
//
// class JacobianWriter {
// // Create a jacobian that this writer can write. Same as
// // Evaluator::CreateJacobian.
// SparseMatrix* CreateJacobian() const;
//
// // Create num_threads evaluate preparers. Caller owns result which must
// // be freed with delete[]. Resulting preparers are valid while *this is.
// EvaluatePreparer* CreateEvaluatePreparers(int num_threads);
//
// // Write the block jacobians from a residual block evaluation to the
// // larger sparse jacobian.
// void Write(int residual_id,
// int residual_offset,
// double** jacobians,
// SparseMatrix* jacobian);
// }
//
// Note: The ProgramEvaluator is not thread safe, since internally it maintains
// some per-thread scratch space.
#ifndef CERES_INTERNAL_PROGRAM_EVALUATOR_H_
#define CERES_INTERNAL_PROGRAM_EVALUATOR_H_
#ifdef CERES_USE_OPENMP
#include <omp.h>
#else
#include <tbb/tbb.h>
#endif
#include <map>
#include <string>
#include <vector>
#include "ceres/execution_summary.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/parameter_block.h"
#include "ceres/program.h"
#include "ceres/residual_block.h"
#include "ceres/small_blas.h"
namespace ceres {
namespace internal {
struct NullJacobianFinalizer {
void operator()(SparseMatrix* jacobian, int num_parameters) {}
};
template<typename EvaluatePreparer,
typename JacobianWriter,
typename JacobianFinalizer = NullJacobianFinalizer>
class ProgramEvaluator : public Evaluator {
public:
ProgramEvaluator(const Evaluator::Options &options, Program* program)
: options_(options),
program_(program),
jacobian_writer_(options, program),
evaluate_preparers_(
jacobian_writer_.CreateEvaluatePreparers(options.num_threads)),
thread_queue_(options.num_threads) {
// #ifndef CERES_USE_OPENMP
// CHECK_EQ(1, options_.num_threads)
// << "OpenMP support is not compiled into this binary; "
// << "only options.num_threads=1 is supported.";
// #endif
BuildResidualLayout(*program, &residual_layout_);
evaluate_scratch_.reset(CreateEvaluatorScratch(*program,
options.num_threads));
}
// Implementation of Evaluator interface.
SparseMatrix* CreateJacobian() const {
return jacobian_writer_.CreateJacobian();
}
bool Evaluate(const Evaluator::EvaluateOptions& evaluate_options,
const double* state,
double* cost,
double* residuals,
double* gradient,
SparseMatrix* jacobian) {
ScopedExecutionTimer total_timer("Evaluator::Total", &execution_summary_);
ScopedExecutionTimer call_type_timer(gradient == NULL && jacobian == NULL
? "Evaluator::Residual"
: "Evaluator::Jacobian",
&execution_summary_);
// The parameters are stateful, so set the state before evaluating.
if (!program_->StateVectorToParameterBlocks(state)) {
return false;
}
if (residuals != NULL) {
VectorRef(residuals, program_->NumResiduals()).setZero();
}
if (jacobian != NULL) {
jacobian->SetZero();
}
// Each thread gets it's own cost and evaluate scratch space.
for (int i = 0; i < options_.num_threads; ++i) {
evaluate_scratch_[i].cost = 0.0;
if (gradient != NULL) {
VectorRef(evaluate_scratch_[i].gradient.get(),
program_->NumEffectiveParameters()).setZero();
}
}
// This bool is used to disable the loop if an error is encountered
// without breaking out of it. The remaining loop iterations are still run,
// but with an empty body, and so will finish quickly.
bool abort = false;
#ifndef CERES_USE_OPENMP
std::mutex abort_mutex;
#endif
int num_residual_blocks = program_->NumResidualBlocks();
#ifdef CERES_USE_OPENMP
#pragma omp parallel for num_threads(options_.num_threads)
for (int i = 0; i < num_residual_blocks; ++i) {
// Disable the loop instead of breaking, as required by OpenMP.
#pragma omp flush(abort)
if (abort) {
continue;
}
int thread_id = omp_get_thread_num();
#else
tbb::parallel_for(size_t(0), size_t(num_residual_blocks),
[&](size_t i) {
abort_mutex.lock();
if (abort) {
abort_mutex.unlock();
#ifdef CERES_USE_OPENMP
continue;
#else
return;
#endif
}
abort_mutex.unlock();
int thread_id;
thread_queue_.wait_and_pop(thread_id);
#endif
// int thread_id = 0.0;
// for (int i = 0; i < num_residual_blocks; ++i) {
// abort_mutex.lock();
// if (abort) {
// abort_mutex.unlock();
// continue;
// }
// abort_mutex.unlock();
EvaluatePreparer* preparer = &evaluate_preparers_[thread_id];
EvaluateScratch* scratch = &evaluate_scratch_[thread_id];
// Prepare block residuals if requested.
const ResidualBlock* residual_block = program_->residual_blocks()[i];
double* block_residuals = NULL;
if (residuals != NULL) {
block_residuals = residuals + residual_layout_[i];
} else if (gradient != NULL) {
block_residuals = scratch->residual_block_residuals.get();
}
// Prepare block jacobians if requested.
double** block_jacobians = NULL;
if (jacobian != NULL || gradient != NULL) {
preparer->Prepare(residual_block,
i,
jacobian,
scratch->jacobian_block_ptrs.get());
block_jacobians = scratch->jacobian_block_ptrs.get();
}
// Evaluate the cost, residuals, and jacobians.
double block_cost;
if (!residual_block->Evaluate(
evaluate_options.apply_loss_function,
&block_cost,
block_residuals,
block_jacobians,
scratch->residual_block_evaluate_scratch.get())) {
#ifdef CERES_USE_OPENMP
// This ensures that the OpenMP threads have a consistent view of 'abort'. Do
// the flush inside the failure case so that there is usually only one
// synchronization point per loop iteration instead of two.
#pragma omp flush(abort)
abort = true;
continue;
#else
abort_mutex.lock();
abort = true;
abort_mutex.unlock();
thread_queue_.push(thread_id);\
return;
#endif
}
scratch->cost += block_cost;
// Store the jacobians, if they were requested.
if (jacobian != NULL) {
jacobian_writer_.Write(i,
residual_layout_[i],
block_jacobians,
jacobian);
}
// Compute and store the gradient, if it was requested.
if (gradient != NULL) {
int num_residuals = residual_block->NumResiduals();
int num_parameter_blocks = residual_block->NumParameterBlocks();
for (int j = 0; j < num_parameter_blocks; ++j) {
const ParameterBlock* parameter_block =
residual_block->parameter_blocks()[j];
if (parameter_block->IsConstant()) {
continue;
}
MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
block_jacobians[j],
num_residuals,
parameter_block->LocalSize(),
block_residuals,
scratch->gradient.get() + parameter_block->delta_offset());
}
}
#ifndef CERES_USE_OPENMP
thread_queue_.push(thread_id);
#endif
}
#ifndef CERES_USE_OPENMP
);
#endif
if (!abort) {
const int num_parameters = program_->NumEffectiveParameters();
// Sum the cost and gradient (if requested) from each thread.
(*cost) = 0.0;
if (gradient != NULL) {
VectorRef(gradient, num_parameters).setZero();
}
for (int i = 0; i < options_.num_threads; ++i) {
(*cost) += evaluate_scratch_[i].cost;
if (gradient != NULL) {
VectorRef(gradient, num_parameters) +=
VectorRef(evaluate_scratch_[i].gradient.get(), num_parameters);
}
}
// Finalize the Jacobian if it is available.
// `num_parameters` is passed to the finalizer so that additional
// storage can be reserved for additional diagonal elements if
// necessary.
if (jacobian != NULL) {
JacobianFinalizer f;
f(jacobian, num_parameters);
}
}
return !abort;
}
bool Plus(const double* state,
const double* delta,
double* state_plus_delta) const {
return program_->Plus(state, delta, state_plus_delta);
}
int NumParameters() const {
return program_->NumParameters();
}
int NumEffectiveParameters() const {
return program_->NumEffectiveParameters();
}
int NumResiduals() const {
return program_->NumResiduals();
}
virtual map<string, int> CallStatistics() const {
return execution_summary_.calls();
}
virtual map<string, double> TimeStatistics() const {
return execution_summary_.times();
}
private:
// Per-thread scratch space needed to evaluate and store each residual block.
struct EvaluateScratch {
void Init(int max_parameters_per_residual_block,
int max_scratch_doubles_needed_for_evaluate,
int max_residuals_per_residual_block,
int num_parameters) {
residual_block_evaluate_scratch.reset(
new double[max_scratch_doubles_needed_for_evaluate]);
gradient.reset(new double[num_parameters]);
VectorRef(gradient.get(), num_parameters).setZero();
residual_block_residuals.reset(
new double[max_residuals_per_residual_block]);
jacobian_block_ptrs.reset(
new double*[max_parameters_per_residual_block]);
}
double cost;
scoped_array<double> residual_block_evaluate_scratch;
// The gradient in the local parameterization.
scoped_array<double> gradient;
// Enough space to store the residual for the largest residual block.
scoped_array<double> residual_block_residuals;
scoped_array<double*> jacobian_block_ptrs;
};
static void BuildResidualLayout(const Program& program,
vector<int>* residual_layout) {
const vector<ResidualBlock*>& residual_blocks = program.residual_blocks();
residual_layout->resize(program.NumResidualBlocks());
int residual_pos = 0;
for (int i = 0; i < residual_blocks.size(); ++i) {
const int num_residuals = residual_blocks[i]->NumResiduals();
(*residual_layout)[i] = residual_pos;
residual_pos += num_residuals;
}
}
// Create scratch space for each thread evaluating the program.
static EvaluateScratch* CreateEvaluatorScratch(const Program& program,
int num_threads) {
int max_parameters_per_residual_block =
program.MaxParametersPerResidualBlock();
int max_scratch_doubles_needed_for_evaluate =
program.MaxScratchDoublesNeededForEvaluate();
int max_residuals_per_residual_block =
program.MaxResidualsPerResidualBlock();
int num_parameters = program.NumEffectiveParameters();
EvaluateScratch* evaluate_scratch = new EvaluateScratch[num_threads];
for (int i = 0; i < num_threads; i++) {
evaluate_scratch[i].Init(max_parameters_per_residual_block,
max_scratch_doubles_needed_for_evaluate,
max_residuals_per_residual_block,
num_parameters);
}
return evaluate_scratch;
}
Evaluator::Options options_;
Program* program_;
JacobianWriter jacobian_writer_;
scoped_array<EvaluatePreparer> evaluate_preparers_;
scoped_array<EvaluateScratch> evaluate_scratch_;
#ifndef CERES_USE_OPENMP
concurrent_queue<int> thread_queue_;
#endif
vector<int> residual_layout_;
::ceres::internal::ExecutionSummary execution_summary_;
};
} // namespace internal
} // namespace ceres
#endif // CERES_INTERNAL_PROGRAM_EVALUATOR_H_
|
conv_direct_hcl_x86.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "sys_port.h"
#include "module.h"
#include "tengine_errno.h"
#include "tengine_log.h"
#include "tengine_ir.h"
#include "../../cpu_node_ops.h"
#include "tengine_op.h"
#include "convolution_param.h"
#include <math.h>
void pad_int8(int8_t* input, int8_t* output, int in_h, int in_w, int out_h, int out_w, int top, int left, int8_t v)
{
int8_t* ptr = input;
int8_t* outptr = output;
int y = 0;
// fill top
for (; y < top; y++)
{
int x = 0;
for (; x < out_w; x++)
{
outptr[x] = v;
}
outptr += out_w;
}
// fill center
for (; y < (top + in_h); y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = v;
}
if (in_w < 12)
{
for (; x < (left + in_w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, in_w * sizeof(int8_t));
x += in_w;
}
for (; x < out_w; x++)
{
outptr[x] = v;
}
ptr += in_w;
outptr += out_w;
}
// fill bottom
for (; y < out_h; y++)
{
int x = 0;
for (; x < out_w; x++)
{
outptr[x] = v;
}
outptr += out_w;
}
}
static int conv3x3s1_int8_sse(struct ir_tensor* input_tensor, struct ir_tensor* weight_tensor, struct ir_tensor* bias_tensor,
struct ir_tensor* output_tensor, struct conv_param* param, int num_thread)
{
int inch = input_tensor->dims[1];
int inh = input_tensor->dims[2];
int inw = input_tensor->dims[3];
int in_hw = inh * inw;
int outch = output_tensor->dims[1];
int outh = output_tensor->dims[2];
int outw = output_tensor->dims[3];
int out_hw = outh * outw;
int out_size = output_tensor->elem_num;
int pad_w = param->pad_w0;
int pad_h = param->pad_h0;
int32_t* output_int32 = (int32_t*)sys_malloc(out_size * sizeof(int32_t));
memset(output_int32, 0, out_size * sizeof(int32_t));
float* output_fp32 = (float*)sys_malloc(out_size * sizeof(float));
int8_t* output_int8 = output_tensor->data;
int8_t* input_int8 = input_tensor->data;
int32_t* bias_int32 = NULL;
if(bias_tensor)
bias_int32 = bias_tensor->data;
/* get scale value of quantizaiton */
float input_scale = input_tensor->scale;
float* kernel_scales = weight_tensor->scale_list;
float output_scale = output_tensor->scale;
const signed char* kernel = weight_tensor->data;
/* pading */
int inh_tmp = inh + pad_h + pad_h;
int inw_tmp = inw + pad_w + pad_w;
int8_t* input_tmp = NULL;
if (inh_tmp == inh && inw_tmp == inw)
input_tmp = input_int8;
else
{
input_tmp = ( int8_t* )sys_malloc(inh_tmp * inw_tmp * inch * sizeof(int8_t));
for (int g = 0; g < inch; g++)
{
int8_t* pad_in = input_int8 + g * inh * inw;
int8_t* pad_out = input_tmp + g * inh_tmp * inw_tmp;
pad_int8(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0);
}
}
#pragma omp parallel for num_threads(num_thread)
for (int p = 0; p < outch; p++)
{
int32_t* out0 = output_int32 + p * out_hw;
int8_t* kernel0 = (int8_t* )kernel + p * inch * 9;
for (int q = 0; q < inch; q++)
{
int* outptr0 = out0;
int8_t* img0 = input_tmp + q * inw_tmp * inh_tmp;
int8_t* r0 = img0;
int8_t* r1 = img0 + inw_tmp;
int8_t* r2 = img0 + inw_tmp * 2;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum0 = 0;
sum0 += ( int )r0[0] * kernel0[0];
sum0 += ( int )r0[1] * kernel0[1];
sum0 += ( int )r0[2] * kernel0[2];
sum0 += ( int )r1[0] * kernel0[3];
sum0 += ( int )r1[1] * kernel0[4];
sum0 += ( int )r1[2] * kernel0[5];
sum0 += ( int )r2[0] * kernel0[6];
sum0 += ( int )r2[1] * kernel0[7];
sum0 += ( int )r2[2] * kernel0[8];
*outptr0 += sum0;
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
kernel0 += 9;
}
}
/* process bias and dequant output from int32 to fp32 */
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (bias_tensor)
output_fp32[output_off] = (float )(output_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i];
else
output_fp32[output_off] = (float )output_int32[output_off] * input_scale * kernel_scales[i];
}
}
/* process activation relu */
if (param->activation == 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
}
}
}
/* process activation relu6 */
if (param->activation > 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
if (output_fp32[output_off] > 6)
output_fp32[output_off] = 6;
}
}
}
/* quant from fp32 to int8 */
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
int32_t data_i32 = ( int32_t )(round(output_fp32[output_off] / output_scale));
if (data_i32 > 127)
data_i32 = 127;
else if (data_i32 < -127)
data_i32 = -127;
output_int8[output_off] = (int8_t)data_i32;
}
}
sys_free(output_int32);
sys_free(output_fp32);
if (!(inh_tmp == inh && inw_tmp == inw))
sys_free(input_tmp);
return 0;
}
static int conv3x3s2_int8_sse(struct ir_tensor* input_tensor, struct ir_tensor* weight_tensor, struct ir_tensor* bias_tensor,
struct ir_tensor* output_tensor, struct conv_param* param, int num_thread)
{
int inch = input_tensor->dims[1];
int inh = input_tensor->dims[2];
int inw = input_tensor->dims[3];
int in_hw = inh * inw;
int outch = output_tensor->dims[1];
int outh = output_tensor->dims[2];
int outw = output_tensor->dims[3];
int out_hw = outh * outw;
int out_size = output_tensor->elem_num;
int pad_w = param->pad_w0;
int pad_h = param->pad_h0;
int32_t* output_int32 = (int32_t*)sys_malloc(out_size * sizeof(int32_t));
memset(output_int32, 0, out_size * sizeof(int32_t));
float* output_fp32 = (float*)sys_malloc(out_size * sizeof(float));
int8_t* output_int8 = output_tensor->data;
int8_t* input_int8 = input_tensor->data;
int32_t* bias_int32 = NULL;
if(bias_tensor)
bias_int32 = bias_tensor->data;
/* get scale value of quantizaiton */
float input_scale = input_tensor->scale;
float* kernel_scales = weight_tensor->scale_list;
float output_scale = output_tensor->scale;
const signed char* kernel = weight_tensor->data;
/* pading */
int inh_tmp = inh + pad_h + pad_h;
int inw_tmp = inw + pad_w + pad_w;
int8_t* input_tmp = NULL;
if (inh_tmp == inh && inw_tmp == inw)
input_tmp = input_int8;
else
{
input_tmp = ( int8_t* )sys_malloc(inh_tmp * inw_tmp * inch * sizeof(int8_t));
for (int g = 0; g < inch; g++)
{
int8_t* pad_in = input_int8 + g * inh * inw;
int8_t* pad_out = input_tmp + g * inh_tmp * inw_tmp;
pad_int8(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0);
}
}
int tailstep = inw_tmp - 2 * outw + inw_tmp;
#pragma omp parallel for num_threads(num_thread)
for (int p = 0; p < outch; p++)
{
int32_t* out0 = output_int32 + p * out_hw;
int8_t* kernel0 = (int8_t* )kernel + p * inch * 9;
for (int q = 0; q < inch; q++)
{
int* outptr0 = out0;
int8_t* img0 = input_tmp + q * inw_tmp * inh_tmp;
int8_t* r0 = img0;
int8_t* r1 = img0 + inw_tmp;
int8_t* r2 = img0 + inw_tmp * 2;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum0 = 0;
sum0 += ( int )r0[0] * kernel0[0];
sum0 += ( int )r0[1] * kernel0[1];
sum0 += ( int )r0[2] * kernel0[2];
sum0 += ( int )r1[0] * kernel0[3];
sum0 += ( int )r1[1] * kernel0[4];
sum0 += ( int )r1[2] * kernel0[5];
sum0 += ( int )r2[0] * kernel0[6];
sum0 += ( int )r2[1] * kernel0[7];
sum0 += ( int )r2[2] * kernel0[8];
*outptr0 += sum0;
r0 += 2;
r1 += 2;
r2 += 2;
outptr0++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
kernel0 += 9;
}
}
/* process bias and dequant output from int32 to fp32 */
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (bias_tensor)
output_fp32[output_off] = (float )(output_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i];
else
output_fp32[output_off] = (float )output_int32[output_off] * input_scale * kernel_scales[i];
}
}
/* process activation relu */
if (param->activation == 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
}
}
}
/* process activation relu6 */
if (param->activation > 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
if (output_fp32[output_off] > 6)
output_fp32[output_off] = 6;
}
}
}
/* quant from fp32 to int8 */
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
int32_t data_i32 = ( int32_t )(round(output_fp32[output_off] / output_scale));
if (data_i32 > 127)
data_i32 = 127;
else if (data_i32 < -127)
data_i32 = -127;
output_int8[output_off] = (int8_t)data_i32;
}
}
sys_free(output_int32);
sys_free(output_fp32);
if (!(inh_tmp == inh && inw_tmp == inw))
sys_free(input_tmp);
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor;
struct ir_tensor* weight_tensor;
struct ir_tensor* bias_tensor = NULL;
struct ir_tensor* output_tensor = NULL;
int num_thread = exec_graph->num_thread;
/* set the input data and shape again, in case of reshape or dynamic shape */
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
weight_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
if (ir_node->input_num > 2)
bias_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[2]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct conv_param* conv_param = ( struct conv_param* )ir_node->op.param_mem;
int ret = -1;
switch(conv_param->stride_h)
{
case 1:
ret = conv3x3s1_int8_sse(input_tensor, weight_tensor, bias_tensor, output_tensor, conv_param, num_thread);
break;
case 2:
ret = conv3x3s2_int8_sse(input_tensor, weight_tensor, bias_tensor, output_tensor, conv_param, num_thread);
break;
default:
TLOG_ERR("Direct Convolution Int8 not support the stride %d\n", conv_param->stride_h);
set_tengine_errno(EFAULT);
}
return ret;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node)
{
struct conv_param* param = ( struct conv_param* )exec_node->op.param_mem;
struct ir_node* ir_node = exec_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor;
int group = param->group;
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
int pad_h0 = param->pad_h0;
int pad_w0 = param->pad_w0;
int pad_h1 = param->pad_h1;
int pad_w1 = param->pad_w1;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
/* only support int8 */
if (input_tensor->data_type != TENGINE_DT_INT8)
return 0;
if (group == 1 && pad_h0 == pad_h1 && pad_w0 == pad_w1 && dilation_h == 1 && dilation_w == 1 && kernel_h == 3 && kernel_w == 3 &&
((stride_h == 1 && stride_w == 1) || (stride_h == 2 && stride_w == 2)))
return OPS_SCORE_BEST * 2;
else
return 0;
}
static struct node_ops hcl_node_ops = {.prerun = NULL,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
static int reg_conv_dw_ops(void* arg)
{
return register_builtin_node_ops(OP_CONV, &hcl_node_ops);
}
static int unreg_conv_dw_ops(void* arg)
{
unregister_builtin_node_ops(OP_CONV, &hcl_node_ops);
return 0;
}
AUTO_REGISTER_OPS(reg_conv_dw_ops);
AUTO_UNREGISTER_OPS(unreg_conv_dw_ops);
|
depthwise_convolution_3x3_fp16.c | /*
* Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* CSI-NN2 version 1.12.x */
#include "csi_c906.h"
/*
(1) Algorithm works as follows:
out_h2: out_h2_w8_loop --> out_h2_w4 --> out_h2_wtail
out_h_tail: out_h1_w8_loop --> out_h1_w4 --> out_h1_wtail
out_h2_w8: out_h2_w4: || out_h1_w8: out_h1_w4:
outptr0[0-7]: outptr1[0-7]: outptr0[0-3]: outptr1[0-3] || outptr0[0-7]: outptr0[0-3]:
k00 * r0[0-7] k00 * r1[0-7] k00 * r0[0-3] k00 * r1[0-3] || k00 * r0[0-7] k00 * r0[0-3]
k01 * r0[1-8] k01 * r1[1-8] k01 * r0[1-4] k01 * r1[1-4] || k01 * r0[1-8] k01 * r0[1-4]
k02 * r0[2-9] k02 * r1[2-9] k02 * r0[2-5] k02 * r1[2-5] || k02 * r0[2-9] k02 * r0[2-5]
k10 * r1[0-7] k10 * r2[0-7] k10 * r1[0-3] k10 * r2[0-3] || k10 * r1[0-7] k10 * r1[0-3]
k11 * r1[1-8] k11 * r2[1-8] k11 * r1[1-4] k11 * r2[1-4] || k11 * r1[1-8] k11 * r1[1-4]
k12 * r1[2-9] k12 * r2[2-9] k12 * r1[2-5] k12 * r2[2-5] || k12 * r1[2-9] k12 * r1[2-5]
k20 * r2[0-7] k20 * r3[0-7] k20 * r2[0-3] k20 * r3[0-3] || k20 * r2[0-7] k20 * r2[0-3]
k21 * r2[1-8] k21 * r3[1-8] k21 * r2[1-4] k21 * r3[1-4] || k21 * r2[1-8] k21 * r2[1-4]
k22 * r2[2-9] k22 * r3[2-9] k22 * r2[2-5] k22 * r3[2-5] || k22 * r2[2-9] k22 * r2[2-5]
(2) register definition:
t0: i_out_h
t1-t2: i_out_w
v0: bias0[0-7], output_data(acc)
v2: bias1[0-7], output_data(acc)
v4,v6,v8: r0 v4:r0[0-7] v6:r0[1-8] v8:r0[2-9]
v10,v12,v14:r3
v16,v18,v20:r1
v22,v24,v26:r2
ft0-ft8: [ k00,k01,k02,k10,k11,k12,k20,k21,k22 ]
(3) // TODO: support channel mult ??
opt padding
*/
int csi_c906_dwconv3x3s1_fp16(struct csi_tensor *input,
struct csi_tensor *output,
struct csi_tensor *kernel,
struct csi_tensor *bias,
struct conv2d_params *params)
{
__fp16 *input_data = (__fp16 *)input->data;
__fp16 *output_data = (__fp16 *)output->data;
__fp16 *kernel_data = (__fp16 *)kernel->data;
__fp16 *bias_data = (__fp16 *)bias->data;
int32_t batch = input->dim[0];
int32_t in_c = input->dim[1]; // group = in_channel
int32_t in_h = input->dim[2];
int32_t in_w = input->dim[3];
int32_t out_c = output->dim[1];
int32_t out_h = output->dim[2];
int32_t out_w = output->dim[3];
__fp16 *input_padd_buf = (__fp16 *)csi_mem_alloc(in_c * (in_h + params->pad_top + params->pad_down) * (in_w + params->pad_left + params->pad_right) * sizeof(float));
csi_c906_pad_input_fp16(input_data, input_padd_buf, in_c, in_h, in_w, in_h + params->pad_top + params->pad_down, in_w + params->pad_left + params->pad_right, params->pad_top, params->pad_left);
in_h = in_h + params->pad_top + params->pad_down;
in_w = in_w + params->pad_left + params->pad_right;
#pragma omp parallel for num_threads(1)
for (int c = 0; c < in_c; c++) {
__fp16 *out = output_data + c * out_h * out_w;
__fp16 *outptr0 = out;
__fp16 *outptr1 = outptr0 + out_w;
const __fp16 bias0 = bias_data ? bias_data[c] : 0.0f;
const __fp16 *img0 = input_padd_buf + c * in_h * in_w;
const __fp16 *r0 = img0;
const __fp16 *r1 = r0 + in_w;
const __fp16 *r2 = r1 + in_w;
const __fp16 *r3 = r2 + in_w;
const __fp16 *kernel0 = kernel_data + c * 9;
asm volatile(
"vsetvli zero, zero, e16, m1\n\t"
"flh ft0, 0(%0)\n\t" // k00
"flh ft1, 2(%0)\n\t" // k01
"flh ft2, 4(%0)\n\t" // k02
"flh ft3, 6(%0)\n\t" // k10
"flh ft4, 8(%0)\n\t" // k11
"flh ft5, 10(%0)\n\t" // k12
"flh ft6, 12(%0)\n\t" // k20
"flh ft7, 14(%0)\n\t" // k21
"flh ft8, 16(%0)\n\t" // k22
"srai t0, %7, 1\n\t" // t0 = out_h >> 1
"beqz t0, 7f\n\t"
"1:\n\t" // out_h_loop2
"srai t1, %8, 3\n\t" // t1 = out_w >> 3
"beqz t1, 3f\n\t"
"vsetvli zero, zero, e16, m1\n\t" // set vl = 8
// pre-load rxx
"vle.v v4, (%1)\n\t" // r0[0-7]
"addi %1, %1, 2\n\t" // r0++
"vle.v v6, (%1)\n\t" // r0[1-8]
"addi %1, %1, 2\n\t" // r0++
"vle.v v8, (%1)\n\t" // r0[2-9]
"2:\n\t" // out_w_loop8
"vfmv.v.f v0, %20\n\t" // bias0[0-7]
"addi %1, %1, 12\n\t" // r0 += 6
"vle.v v10, (%4)\n\t" // r3[0-7]
"addi %4, %4, 2\n\t" // r3++
"vfmv.v.f v2, %20\n\t" // bias1[0-7]
"vle.v v12, (%4)\n\t" // r3[1-8]
"addi %4, %4, 2\n\t" // r3++
"vfmacc.vf v0, ft0, v4\n\t" // k00 * r0[0-7]
"vfmacc.vf v2, ft6, v10\n\t" // k20 * r3[0-7]
"vle.v v14, (%4)\n\t" // r3[2-9]
"addi %4, %4, 12\n\t" // r3 += 6
"vfmacc.vf v0, ft1, v6\n\t" // k01 * r0[1-8]
"vfmacc.vf v2, ft7, v12\n\t" // k21 * r3[1-8]
"vle.v v16, (%2)\n\t" // r1[0-7]
"addi %2, %2, 2\n\t" // r1++
"vfmacc.vf v0, ft2, v8\n\t" // k02 * r0[2-9]
"vfmacc.vf v2, ft8, v14\n\t" // k22 * r3[2-9]
"vle.v v18, (%2)\n\t" // r1[1-8]
"addi %2, %2, 2\n\t" // r1++
"vfmacc.vf v0, ft3, v16\n\t" // k10 * r1[0-7]
"vfmacc.vf v2, ft0, v16\n\t" // k00 * r1[0-7]
"vle.v v20, (%2)\n\t" // r1[2-9]
"addi %2, %2, 12\n\t" // r1 += 6
"vfmacc.vf v0, ft4, v18\n\t" // k11 * r1[1-8]
"vfmacc.vf v2, ft1, v18\n\t" // k01 * r1[1-8]
"vle.v v22, (%3)\n\t" // r2[0-7]
"addi %3, %3, 2\n\t" // r2++
"vfmacc.vf v0, ft5, v20\n\t" // k12 * r1[2-9]
"vfmacc.vf v2, ft2, v20\n\t" // k02 * r1[2-9]
"vle.v v24, (%3)\n\t" // r2[1-8]
"addi %3, %3, 2\n\t" // r2++
"vfmacc.vf v0, ft6, v22\n\t" // k20 * r2[0-7]
"vfmacc.vf v2, ft3, v22\n\t" // k10 * r2[0-7]
"vle.v v26, (%3)\n\t" // r2[2-9]
"addi %3, %3, 12\n\t" // r2 += 6
"vfmacc.vf v0, ft7, v24\n\t" // k21 * r2[1-8]
"vfmacc.vf v2, ft4, v24\n\t" // k11 * r2[1-8]
"vle.v v4, (%1)\n\t" // r0[0-7] load r0 for next loop
"addi %1, %1, 2\n\t" // r0++
"vfmacc.vf v0, ft8, v26\n\t" // k22 * r2[2-9]
"vle.v v6, (%1)\n\t" // r0[1-8]
"addi %1, %1, 2\n\t" // r0++
"vfmacc.vf v2, ft5, v26\n\t" // k12 * r2[2-9]
"vle.v v8, (%1)\n\t" // r0[2-9]
"vse.v v0, (%5)\n\t" // store line0 8 elements on outptr0
"addi %5, %5, 16\n\t" // outptr0 += 8
"vse.v v2, (%6)\n\t" // store line1 8 elements on outptr1
"addi %6, %6, 16\n\t" // outptr1 += 8
"addi t1, t1, -1\n\t"
"bnez t1, 2b\n\t"
"addi %1, %1, -4\n\t" // r0 -= 2 ********* bump r0 to origin addr ************
"3:\n\t" // out_w4 : can only be executed once in h2 loop
"andi t1, %8, 7\n\t" // t1 = out_w & 7
"srai t2, t1, 2\n\t" // t2 = (out_w & 7) >> 2
"beqz t2, 4f\n\t"
"li t5, 4\n\t"
"vsetvli zero, t5, e16, m1\n\t" // set vl = 8 actually low 4 used
// "vsetvli zero, zero, e16, m1\n\t" // set vl = 8 actually low 4 used
"vle.v v4, (%1)\n\t" // r0[0-3] [4-7] unused
"addi %1, %1, 2\n\t" // r0++
"vfmv.v.f v0, %20\n\t" // bias0[0-3]
"vle.v v10, (%4)\n\t" // r3[0-3]
"addi %4, %4, 2\n\t" // r3++
"vfmv.v.f v2, %20\n\t" // bias1[0-3]
"vle.v v5, (%1)\n\t" // r0[1-4]
"addi %1, %1, 2\n\t" // r0++
"vle.v v11, (%4)\n\t" // r3[1-4]
"addi %4, %4, 2\n\t" // r3++
"vfmacc.vf v0, ft0, v4\n\t" // k00 * r0[0-3]
"vfmacc.vf v2, ft6, v10\n\t" // k20 * r3[0-3]
"vle.v v6, (%1)\n\t" // r0[2-5]
"addi %1, %1, 4\n\t" // r0 += 2
"vle.v v12, (%4)\n\t" // r3[2-5]
"addi %4, %4, 4\n\t" // r3 += 2
"vfmacc.vf v0, ft1, v5\n\t" // k01 * r0[1-4]
"vfmacc.vf v2, ft7, v11\n\t" // k21 * r3[1-4]
"vle.v v16, (%2)\n\t" // r1[0-3]
"addi %2, %2, 2\n\t" // r1++
"vfmacc.vf v0, ft2, v6\n\t" // k02 * r0[2-5]
"vfmacc.vf v2, ft8, v12\n\t" // k22 * r3[2-5]
"vle.v v17, (%2)\n\t" // r1[1-4]
"addi %2, %2, 2\n\t" // r1++
"vfmacc.vf v0, ft3, v16\n\t" // k10 * r1[0-3]
"vfmacc.vf v2, ft0, v16\n\t" // k00 * r1[0-3]
"vle.v v18, (%2)\n\t" // r1[2-5]
"addi %2, %2, 4\n\t" // r1 += 2
"vfmacc.vf v0, ft4, v17\n\t" // k11 * r1[1-4]
"vfmacc.vf v2, ft1, v17\n\t" // k01 * r1[1-4]
"vle.v v22, (%3)\n\t" // r2[0-3]
"addi %3, %3, 2\n\t" // r2++
"vfmacc.vf v0, ft5, v18\n\t" // k12 * r1[2-5]
"vfmacc.vf v2, ft2, v18\n\t" // k02 * r1[2-5]]
"vle.v v23, (%3)\n\t" // r2[1-4]
"addi %3, %3, 2\n\t" // r2++
"vfmacc.vf v0, ft6, v22\n\t" // k20 * r2[0-3]
"vfmacc.vf v2, ft3, v22\n\t" // k10 * r2[0-3]
"vle.v v24, (%3)\n\t" // r2[2-5]
"addi %3, %3, 4\n\t" // r2 += 2
"vfmacc.vf v0, ft7, v23\n\t" // k21 * r2[1-4]
"vfmacc.vf v2, ft4, v23\n\t" // k11 * r2[1-4]
"vfmacc.vf v0, ft8, v24\n\t" // k22 * r2[2-5]
"vfmacc.vf v2, ft5, v24\n\t" // k12 * r2[2-5]
"vse.v v0, (%5)\n\t" // store line0 4 elements on outptr0
"addi %5, %5, 8\n\t" // outptr0 += 4
"vse.v v2, (%6)\n\t" // store line1 4 elements on outptr1
"addi %6, %6, 8\n\t" // outptr1 += 4
"4:\n\t" // out_w_tail
"andi t2, t1, 3\n\t" // t2 = (out_w & 7) & 3
"beqz t2, 6f\n\t"
"vfmv.v.f v0, %20\n\t" // bias0[0-3] / bias1[0-3]
"li t5, 3\n\t"
"vsetvli zero, t5, e16, m1\n\t" // set vl = 3
"vle.v v5, (%0)\n\t" // k0
"addi %0, %0, 6\n\t"
"vle.v v6, (%0)\n\t" // k1
"addi %0, %0, 6\n\t"
"vle.v v7, (%0)\n\t" // k2
"5:\n\t" // out_w_tail
"vle.v v4, (%1)\n\t" // r0
"addi %1, %1, 2\n\t" // r0++
"vle.v v16, (%2)\n\t" // r1
"addi %2, %2, 2\n\t" // r1++
"vle.v v22, (%3)\n\t" // r2
"addi %3, %3, 2\n\t" // r2++
"vle.v v10, (%4)\n\t" // r3
"addi %4, %4, 2\n\t" // r3++
"vfmul.vv v8, v4, v5\n\t" // r0 * k0
"vfmacc.vv v8, v16, v6\n\t" // += r1 * k1
"vfmacc.vv v8, v22, v7\n\t" // += r2 * k2
"vfredsum.vs v11, v8, v0\n\t" // v11[0] = v0[0] + sum(v8[0..2])
"vfmv.f.s ft9, v11\n\t" // ft9 = v11[0]
"vfmul.vv v9, v16, v5\n\t" // r1 * k0
"vfmacc.vv v9, v22, v6\n\t" // += r2 * k1
"vfmacc.vv v9, v10, v7\n\t" // += r3 * k2
"vfredsum.vs v12, v9, v0\n\t" // v12[0] = v0[0] + sum(v9[0..2])
"vfmv.f.s ft10, v12\n\t" // ft10 = v12[0]
"fsh ft9, 0(%5)\n\t"
"addi %5, %5, 2\n\t"
"fsh ft10, 0(%6)\n\t"
"addi %6, %6, 2\n\t"
"addi t2, t2, -1\n\t"
"bnez t2, 5b\n\t"
"addi %0, %0, -12\n\t" // kernel -= 6 ********* bump kernel_data to origin addr ************
"6:\n\t" // out_h_loop2 cnt
"slli t3, %9, 1\n\t" // in_w * 2
"addi t3, t3, 4\n\t" // in_w * 2 + 4
"slli t4, %8, 1\n\t" // out_w * 2
"add %1, %1, t3\n\t" // r0 += 2 + in_w
"add %2, %2, t3\n\t" // r1 += 2 + in_w
"add %3, %3, t3\n\t" // r2 += 2 + in_w
"add %4, %4, t3\n\t" // r3 += 2 + in_w
"add %5, %5, t4\n\t" // outptr0 += out_w
"add %6, %6, t4\n\t" // outptr1 += out_w
"addi t0, t0, -1\n\t"
"bnez t0, 1b\n\t"
"7:\n\t" // out_h_tail // 只有执行一次的机会
"andi t0, %7, 1\n\t" // t0 = out_h & 1
"beqz t0, 12f\n\t"
"srai t1, %8, 3\n\t" // t1 = out_w >> 3
"beqz t1, 9f\n\t"
"vsetvli zero, zero, e16, m1\n\t" // set vl = 8
// pre-load rxx
"vle.v v4, (%1)\n\t" // r0[0-7]
"addi %1, %1, 2\n\t" // r0++
"vle.v v6, (%1)\n\t" // r0[1-8]
"addi %1, %1, 2\n\t" // r0++
"vle.v v8, (%1)\n\t" // r0[2-9]
"8:\n\t" // out_w_loop8
"vfmv.v.f v0, %20\n\t" // bias0[0-7]
"addi %1, %1, 12\n\t" // r0 += 6
"vfmacc.vf v0, ft0, v4\n\t" // k00 * r0[0-7]
"vle.v v16, (%2)\n\t" // r1[0-7]
"addi %2, %2, 2\n\t" // r1++
"vfmacc.vf v0, ft1, v6\n\t" // k01 * r0[1-8]
"vle.v v18, (%2)\n\t" // r1[1-8]
"addi %2, %2, 2\n\t" // r1++
"vfmacc.vf v0, ft2, v8\n\t" // k02 * r0[2-9]
"vle.v v20, (%2)\n\t" // r1[2-9]
"addi %2, %2, 12\n\t" // r1 += 6
"vfmacc.vf v0, ft3, v16\n\t" // k10 * r1[0-7]
"vle.v v22, (%3)\n\t" // r2[0-7]
"addi %3, %3, 2\n\t" // r2++
"vfmacc.vf v0, ft4, v18\n\t" // k11 * r1[1-8]
"vle.v v24, (%3)\n\t" // r2[1-8]
"addi %3, %3, 2\n\t" // r2++
"vfmacc.vf v0, ft5, v20\n\t" // k12 * r1[2-9]
"vle.v v26, (%3)\n\t" // r2[2-9]
"addi %3, %3, 12\n\t" // r2 += 6
"vfmacc.vf v0, ft6, v22\n\t" // k20 * r2[0-7]
"vle.v v4, (%1)\n\t" // r0[0-7]
"addi %1, %1, 2\n\t" // r0++
"vfmacc.vf v0, ft7, v24\n\t" // k21 * r2[1-8]
"vle.v v6, (%1)\n\t" // r0[1-8]
"addi %1, %1, 2\n\t" // r0++
"vfmacc.vf v0, ft8, v26\n\t" // k22 * r2[2-9]
"vle.v v8, (%1)\n\t" // r0[2-9]
"vse.v v0, (%5)\n\t" // store line0 8 elements on outptr0
"addi %5, %5, 16\n\t" // outptr0 += 8
"addi t1, t1, -1\n\t"
"bnez t1, 8b\n\t"
"addi %1, %1, -4\n\t" // r0 -= 2 ********* bump r0 to origin addr ************
"9:\n\t" // out_w4
"andi t1, %8, 7\n\t" // t1 = out_w & 7
"srai t2, t1, 2\n\t" // t2 = (out_w & 7) >> 2
"beqz t2, 10f\n\t"
"vsetvli zero, zero, e16, m1\n\t" // set vl = 4
"vle.v v4, (%1)\n\t" // r0[0-3]
"addi %1, %1, 2\n\t" // r0++
"vfmv.v.f v0, %20\n\t" // bias0[0-3]
"vle.v v5, (%1)\n\t" // r0[1-4]
"addi %1, %1, 2\n\t" // r0++
"vfmacc.vf v0, ft0, v4\n\t" // k00 * r0[0-3]
"vle.v v6, (%1)\n\t" // r0[2-5]
"addi %1, %1, 4\n\t" // r0 += 2
"vfmacc.vf v0, ft1, v5\n\t" // k01 * r0[1-4]
"vle.v v16, (%2)\n\t" // r1[0-3]
"addi %2, %2, 2\n\t" // r1++
"vfmacc.vf v0, ft2, v6\n\t" // k02 * r0[2-5]
"vle.v v17, (%2)\n\t" // r1[1-4]
"addi %2, %2, 2\n\t" // r1++
"vfmacc.vf v0, ft3, v16\n\t" // k10 * r1[0-3]
"vle.v v18, (%2)\n\t" // r1[2-5]
"addi %2, %2, 4\n\t" // r1 += 2
"vfmacc.vf v0, ft4, v17\n\t" // k11 * r1[1-4]
"vle.v v22, (%3)\n\t" // r2[0-3]
"addi %3, %3, 2\n\t" // r2++
"vfmacc.vf v0, ft5, v18\n\t" // k12 * r1[2-5]
"vle.v v23, (%3)\n\t" // r2[1-4]
"addi %3, %3, 2\n\t" // r2++
"vfmacc.vf v0, ft6, v22\n\t" // k20 * r2[0-3]
"vle.v v24, (%3)\n\t" // r2[2-5]
"addi %3, %3, 4\n\t" // r2 += 2
"vfmacc.vf v0, ft7, v23\n\t" // k21 * r2[1-4]
"vfmacc.vf v0, ft8, v24\n\t" // k22 * r2[2-5]
"vse.v v0, (%5)\n\t" // store line0 4 elements on outptr0
"addi %5, %5, 16\n\t" // outptr0 += 4
"10:\n\t" // out_w_tail
"andi t2, t1, 3\n\t"
"beqz t2, 12f\n\t"
"vfmv.v.f v0, %20\n\t" // bias0[0-3]
"li t5, 3\n\t"
"vsetvli zero, t5, e16, m1\n\t" // set vl = 3
"vle.v v5, (%0)\n\t" // k0
"addi %0, %0, 6\n\t"
"vle.v v6, (%0)\n\t" // k1
"addi %0, %0, 6\n\t"
"vle.v v7, (%0)\n\t" // k2
"11:\n\t" // out_w_tail
"vle.v v4, (%1)\n\t" // r0
"addi %1, %1, 2\n\t" // r0++
"vle.v v16, (%2)\n\t" // r1
"addi %2, %2, 2\n\t" // r1++
"vle.v v22, (%3)\n\t" // r2
"addi %3, %3, 2\n\t" // r2++
"vfmul.vv v8, v4, v5\n\t" // r0 * k0
"vfmacc.vv v8, v16, v6\n\t" // += r1 * k1
"vfmacc.vv v8, v22, v7\n\t" // += r2 * k2
"vfredsum.vs v11, v8, v0\n\t" // v11[0] = v0[0] + sum(v8[0..2])
"vfmv.f.s ft9, v11\n\t" // ft9 = v11[0]
"fsh ft9, 0(%5)\n\t"
"addi %5, %5, 2\n\t"
"addi t2, t2, -1\n\t"
"bnez t2, 11b\n\t"
"12:\n\t"
// updata addr
"addi %1, %1, 4\n\t" // r0 += 2
"addi %2, %2, 4\n\t" // r1 += 2
"addi %3, %3, 4\n\t" // r2 += 2
:"=r"(kernel0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(outptr0), // %5
"=r"(outptr1), // %6
"=r"(out_h), // %7
"=r"(out_w), // %8
"=r"(in_w) // %9
:"0"(kernel0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(outptr0),
"6"(outptr1),
"7"(out_h),
"8"(out_w),
"9"(in_w),
"f"(bias0) // %20
:"cc", "memory", "v0", "v2", "v4", "v6", "v8", "v10", "v12", "v14", "v16", "v18", "v20", "v22", "v23", "v24", "v26",
"ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7", "ft8", "ft9", "ft10", "t0", "t1", "t2", "t3", "t4", "t5"
);
}
csi_mem_free(input_padd_buf);
return CSINN_TRUE;
}
/*
(1) Algorithm works as follows:
out_h1_loop: out_w8_loop --> out_w4 --> out_w_tail
(2) register definition:
t0: i_out_h loop cnt
t1-t2: i_out_w loop cnt
t3: load stride 2 for r0-r2
t4: constant 3/4 for setting vl = 3/4
ft0: hold 1 output data
v3: bias
ft1-ft2: [ k00, k01, k02, k10, k11, k12, k20, k21, k22 ]
v10-v18: [ k0, k1, k2 ]
v19-v21: [ acc(kx0*rx), acc(kx1*rx), acc(kx2*rx) ]
(3) //TODO: support channel mult ??
Staggered instructions
*/
int csi_c906_dwconv3x3s2_fp16(struct csi_tensor *input,
struct csi_tensor *output,
struct csi_tensor *kernel,
struct csi_tensor *bias,
struct conv2d_params *params)
{
__fp16 *input_data = (__fp16 *)input->data;
__fp16 *output_data = (__fp16 *)output->data;
__fp16 *kernel_data = (__fp16 *)kernel->data;
__fp16 *bias_data = (__fp16 *)bias->data;
int32_t batch = input->dim[0];
int32_t in_c = input->dim[1]; // group = in_channel
int32_t in_h = input->dim[2];
int32_t in_w = input->dim[3];
int32_t out_c = output->dim[1];
int32_t out_h = output->dim[2];
int32_t out_w = output->dim[3];
__fp16 *input_padd_buf = (__fp16 *)csi_mem_alloc(in_c * (in_h + params->pad_top + params->pad_down) * (in_w + params->pad_left + params->pad_right) * sizeof(float));
csi_c906_pad_input_fp16(input_data, input_padd_buf, in_c, in_h, in_w, in_h + params->pad_top + params->pad_down, in_w + params->pad_left + params->pad_right, params->pad_top, params->pad_left);
in_h = in_h + params->pad_top + params->pad_down;
in_w = in_w + params->pad_left + params->pad_right;
int tailstep = in_w - 2 * out_w + in_w;
#pragma omp parallel for num_threads(1)
for (int c = 0; c < in_c; c++) {
__fp16 *out = output_data + c * out_h * out_w;
__fp16 *outptr0 = out;
const __fp16 bias0 = bias_data ? bias_data[c] : 0.0f;
const __fp16 *img0 = input_padd_buf + c * in_h * in_w;
const __fp16 *r0 = img0;
const __fp16 *r1 = r0 + in_w;
const __fp16 *r2 = r1 + in_w;
const __fp16 *kernel0 = kernel_data + c * 9;
asm volatile(
"vsetvli zero, zero, e16, m1\n\t"
"li t3, 4\n\t" // load stride for r_x
"flh ft1, (%0)\n\t"
"flh ft2, 2(%0)\n\t"
"flh ft3, 4(%0)\n\t"
"flh ft4, 6(%0)\n\t"
"flh ft5, 8(%0)\n\t"
"flh ft6, 10(%0)\n\t"
"flh ft7, 12(%0)\n\t"
"flh ft8, 14(%0)\n\t"
"flh ft9, 16(%0)\n\t" // load k00 - k22
"vle.v v10, (%0)\n\t" // k0
"addi %0, %0, 6\n\t"
"vle.v v11, (%0)\n\t" // k1
"addi %0, %0, 6\n\t"
"vle.v v12, (%0)\n\t" // k2
"vfmv.v.f v0, %16\n\t" // bias0
"mv t0, %5\n\t" // i_out_h = out_h
"1:\n\t" // out_h
"srai t1, %6, 3\n\t" // t1 = out_w >> 3
"beqz t1, 3f\n\t"
"vsetvli zero, zero, e16, m1\n\t"
// pre-load rxx
"vlseg2e.v v4, (%1)\n\t" // v4[0..7] = r0[0,2,4,6,8,10,12,14] v5[0..7] = r0[1,3,5,7,9,11,13,15]
"addi %1, %1, 4\n\t" // r0 += 2
"vlse.v v1, (%1), t3\n\t" // r0[2,4,6,8,10,12,14,16]
"addi %1, %1, 28\n\t"
"2:\n\t" // out_w_loop8
"vfmv.v.f v0, %16\n\t" // bias0
"vlseg2e.v v6, (%2)\n\t" // v6[0..7] = r1[0,2,4,6,8,10,12,14] v7[0..7] = r1[1,3,5,7,9,11,13,15]
"addi %2, %2, 4\n\t"
"vfmul.vf v20, v4, ft1\n\t" // = k00 * r0[0,2,4,6,8,10,12,14]
"vfmul.vf v21, v5, ft2\n\t" // = k01 * r0[1,3,5,7,9,11,13,15]
"vlse.v v2, (%2), t3\n\t" // r1[2,4,6,8,10,12,14,16]
"addi %2, %2, 28\n\t"
"vfmacc.vf v0, ft3, v1\n\t" // += k02 * r0[2,4,6,8,10,12,14,16]
"vlseg2e.v v8, (%3)\n\t" // v8[0..7] = r2[0,2,4,6,8,10,12,14] v9[0..7] = r2[1,3,5,7,9,11,13,15]
"addi %3, %3, 4\n\t"
"vfmacc.vf v20, ft4, v6\n\t" // += k10 * r1[0,2,4,6,8,10,12,14]
"vfmacc.vf v21, ft5, v7\n\t" // += k11 * r1[1,3,5,7,9,11,13,15]
"vlse.v v3, (%3), t3\n\t"
"addi %3, %3, 28\n\t"
"vfmacc.vf v0, ft6, v2\n\t" // += k12 * r1[2,4,6,8,10,12,14,16]
"vlseg2e.v v4, (%1)\n\t" // v4[0..3] = r0[0,2,4,6,8,10,12,14] v5[0..3] = r0[1,3,5,7,9,11,13,15]
"addi %1, %1, 4\n\t" // r0 += 2
"vfmacc.vf v20, ft7, v8\n\t" // += k20 * r2[0,2,4,6,8,10,12,14]
"vfmacc.vf v21, ft8, v9\n\t" // += k21 * r2[1,3,5,7,9,11,13,15]
"vlse.v v1, (%1), t3\n\t" // r0[2,4,6,8,10,12,14,16]
"addi %1, %1, 28\n\t"
"vfmacc.vf v0, ft9, v3\n\t" // += k22 * r2[2,4,6,8,10,12,14,16]
"vfadd.vv v2, v20, v21\n\t"
"vfadd.vv v0, v0, v2\n\t"
"vse.v v0, (%4)\n\t"
"addi %4, %4, 16\n\t" // outptr += 8
"addi t1, t1, -1\n\t"
"bnez t1, 2b\n\t"
"addi %1, %1, -32\n\t" // r0 -= 16 ********* bump r0 to origin addr ************
"3:\n\t" // out_w4
"andi t1, %6, 7\n\t" // t1 = out_w & 7
"srai t2, t1, 2\n\t" // t2 = (out_w & 7) >> 2
"beqz t2, 4f\n\t"
"li t4, 4\n\t"
"vsetvli zero, t4, e16, m1\n\t" // set vl = 4
"vfmv.v.f v0, %16\n\t" // bias0
"vlseg2e.v v4, (%1)\n\t" // v4[0..3] = r0[0,2,4,6] v5[0..3] = r0[1,3,5,7]
"addi %1, %1, 4\n\t" // r0 += 2
"vlse.v v1, (%1), t3\n\t" // r0[2,4,6,8]
"addi %1, %1, 12\n\t"
"vfmul.vf v20, v4, ft1\n\t" // = k00 * r0[0,2,4,6]
"vfmul.vf v21, v5, ft2\n\t" // = k01 * r0[1,3,5,7]
"vlseg2e.v v6, (%2)\n\t" // v6[0..3] = r1[0,2,4,6] v7[0..3] = r1[1,3,5,7]
"addi %2, %2, 4\n\t" // r1 += 2
"vfmacc.vf v0, ft3, v1\n\t" // += k02 * r0[2,4,6,8]
"vlse.v v2, (%2), t3\n\t" // r1[2,4,6,8]
"addi %2, %2, 12\n\t"
"vfmacc.vf v20, ft4, v6\n\t" // += k10 * r1[0,2,4,6]
"vfmacc.vf v21, ft5, v7\n\t" // += k11 * r1[1,3,5,7]
"vlseg2e.v v8, (%3)\n\t" // v8[0..3] = r2[0,2,4,6] v9[0..3] = r2[1,3,5,7]
"addi %3, %3, 4\n\t"
"vfmacc.vf v0, ft6, v2\n\t" // += k12 * r1[2,4,6,8]
"vlse.v v3, (%3), t3\n\t" // r2[2,4,6,8]
"addi %3, %3, 12\n\t"
"vfmacc.vf v20, ft7, v8\n\t" // += k20 * r2[0,2,4,6]
"vfmacc.vf v21, ft8, v9\n\t" // += k21 * r2[1,3,5,7]
"vfmacc.vf v0, ft9, v3\n\t" // += k22 * r2[2,4,6,8]
"vfadd.vv v2, v20, v21\n\t"
"vfadd.vv v0, v0, v2\n\t"
"vse.v v0, (%4)\n\t"
"addi %4, %4, 8\n\t" // outptr += 4
"4:\n\t" // out_w_tail
"andi t2, t1, 3\n\t" // t2 = out_w & 3
"beqz t2, 6f\n\t"
"li t4, 3\n\t"
"vsetvli zero, t4, e16, m1\n\t" // set vl = 3
"vfmv.v.f v0, %16\n\t" // bias0
"5:\n\t" // out_w_tail
"vle.v v4, (%1)\n\t" // r0
"addi %1, %1, 4\n\t"
"vle.v v6, (%2)\n\t" // r1
"addi %2, %2, 4\n\t"
"vle.v v8, (%3)\n\t" // r2
"addi %3, %3, 4\n\t"
"vfmul.vv v20, v4, v10\n\t" // r0 * k0
"vfmacc.vv v20, v6, v11\n\t" // += r1 * k1
"vfmacc.vv v20, v8, v12\n\t" // += r2 * k2
"vfredsum.vs v21, v20, v0\n\t" // v21[0] = v0[0](bias) + sum(v20[0..2])
"vfmv.f.s ft0, v21\n\t" // ft0 = v21[0]
"fsh ft0, 0(%4)\n\t"
"addi %4, %4, 2\n\t" // bump output_data pointer
"addi t2, t2, -1\n\t"
"bnez t2, 5b\n\t"
"6:\n\t"
"slli t2, %7, 1\n\t" // t2 = tailstep * 2
"add %1, %1, t2\n\t"
"add %2, %2, t2\n\t"
"add %3, %3, t2\n\t" // r0/r1/r2 += tailstep
"addi t0, t0, -1\n\t"
"bnez t0, 1b\n\t"
:"=r"(kernel0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(outptr0), // %4
"=r"(out_h), // %5
"=r"(out_w), // %6
"=r"(tailstep) // %7
:"0"(kernel0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(outptr0),
"5"(out_h),
"6"(out_w),
"7"(tailstep),
"f"(bias0) // %16
:"cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v20", "v21",
"ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7", "ft8", "ft9", "ft11", "t0", "t1", "t2", "t3", "t4"
);
}
csi_mem_free(input_padd_buf);
return CSINN_TRUE;
}
|
rt_dormqr.c | #include "runtime.h"
void RT_CORE_dormqr(Quark *quark, Quark_Task_Flags *task_flags,
PLASMA_enum side, PLASMA_enum trans,
int m, int n, int k, int ib, int nb,
const double *A, int lda,
const double *T, int ldt,
double *C, int ldc)
{
plasma_context_t *plasma;
plasma = plasma_context_self();
if (plasma->runtime == PLASMA_QUARK) {
QUARK_CORE_dormqr(
quark, task_flags,
side, trans,
m, n, k, ib, nb,
A, lda,
T, ldt,
C, ldc);
}
else if (plasma->runtime == PLASMA_OMPSS) {
double *WORK = malloc(ib*nb*sizeof(double));
//#pragma omp register ([ib*nb]WORK)
//#pragma omp task concurrent([m*nb]A) in([ib*nb]T) inout([m*n]C) label(dormqr)
//printf("\n\n DORMQR BEFORE m %d n %d k %d ib %d lda %d ldt %d ldc %d \n", m, n, k, ib, lda, ldt, ldc);
/*
*/
#pragma omp target device (smp) copy_deps
#pragma omp task in([lda*nb]A, [ldt*nb]T) inout([ldc*nb]C) label(dormqr)
CORE_dormqr(side, trans, m, n, k, ib, A, lda, T, ldt, C, ldc, WORK, nb);
//printf("\n\n DORMQR AFTER\n");
}
}
|
spmv.h | #pragma once
#include <cstring>
#include "dmlc/data.h"
#include "dmlc/omp.h"
namespace dmlc {
/**
* \brief a range between [begin, end)
*/
struct Range {
Range(size_t _begin, size_t _end) : begin(_begin), end(_end) { }
Range() : Range(0, 0) { }
~Range() { }
/**
* \brief evenly divide this range into npart segments, and return the idx-th
* one
*/
inline Range Segment(size_t idx, size_t nparts) const {
CHECK_GE(end, begin);
CHECK_GT(nparts, (size_t)0);
CHECK_LT(idx, nparts);
double itv = static_cast<double>(end - begin) /
static_cast<double>(nparts);
size_t _begin = static_cast<size_t>(begin + itv * idx);
size_t _end = (idx == nparts - 1) ?
end : static_cast<size_t>(begin + itv * (idx+1));
return Range(_begin, _end);
}
/**
* \brief Return true if i contains in this range
*/
inline bool Has(size_t i) const {
return (begin <= i && i < end);
}
size_t begin;
size_t end;
};
/**
* \brief multi-thread sparse matrix vector multiplication
*/
class SpMV {
public:
static const int kDefaultNT = 2;
using SpMat = RowBlock<unsigned>;
/** \brief y = D * x */
template<typename V>
static void Times(const SpMat& D, const std::vector<V>& x,
std::vector<V>* y, int nthreads = kDefaultNT) {
CHECK_NOTNULL(y);
CHECK_EQ(y->size(), D.size);
Times<V>(D, x.data(), y->data(), nthreads);
}
/** \brief y = D^T * x */
template<typename V>
static void TransTimes(const SpMat& D, const std::vector<V>& x,
std::vector<V>* y, int nthreads = kDefaultNT) {
CHECK_EQ(x.size(), D.size);
CHECK_NOTNULL(y);
TransTimes<V>(D, x.data(), y->data(), y->size(), nthreads);
}
/** \brief y = D * x */
template<typename V>
static void Times(const SpMat& D, const V* const x, V* y, int nthreads = kDefaultNT) {
#pragma omp parallel num_threads(nthreads)
{
Range rg = Range(0, D.size).Segment(
omp_get_thread_num(), omp_get_num_threads());
for (size_t i = rg.begin; i < rg.end; ++i) {
if (D.offset[i] == D.offset[i+1]) continue;
V y_i = 0;
if (D.value) {
for (size_t j = D.offset[i]; j < D.offset[i+1]; ++j)
y_i += x[D.index[j]] * D.value[j];
} else {
for (size_t j = D.offset[i]; j < D.offset[i+1]; ++j)
y_i += x[D.index[j]];
}
y[i] = y_i;
}
}
}
/** \brief y = D^T * x */
template<typename V>
static void TransTimes(const SpMat& D, const V* const x, V* y, size_t y_size,
int nthreads = kDefaultNT) {
#pragma omp parallel num_threads(nthreads)
{
Range rg = Range(0, y_size).Segment(
omp_get_thread_num(), omp_get_num_threads());
std::memset(y + rg.begin, 0, sizeof(V) * (rg.end - rg.begin));
for (size_t i = 0; i < D.size; ++i) {
if (D.offset[i] == D.offset[i+1]) continue;
V x_i = x[i];
if (D.value) {
for (size_t j = D.offset[i]; j < D.offset[i+1]; ++j) {
unsigned k = D.index[j];
if (rg.Has(k)) y[k] += x_i * D.value[j];
}
} else {
for (size_t j = D.offset[i]; j < D.offset[i+1]; ++j) {
unsigned k = D.index[j];
if (rg.Has(k)) y[k] += x_i;
}
}
}
}
}
};
} // namespace dmlc
|
SimpleLsh.h | /*
* File: LSH_all.h
* Author: chteflio
*
* Created on March 26, 2015, 9:16 AM
*/
#ifndef LSH_ALL_H
#define LSH_ALL_H
namespace mips {
class SimpleLsh : public Mip {
ProbeBucket probeBucket, probeBucketK;
std::vector<RetrievalArguments> retrArg;
LempArguments args;
inline void transformQueryMatrix(VectorMatrix& leftMatrix, VectorMatrix& queryMatrix) {
// transform queryMatrix (transform ||q|| to 1 and q = [0;q])
queryMatrix.rowNum = leftMatrix.rowNum;
queryMatrix.colNum = leftMatrix.colNum + 1;
queryMatrix.initializeBasics(queryMatrix.colNum, queryMatrix.rowNum, false);
#pragma omp parallel for schedule(static,1000)
for (row_type i = 0; i < queryMatrix.rowNum; i++) {
double* dQuery = leftMatrix.getMatrixRowPtr(i);
double* dTmp = queryMatrix.getMatrixRowPtr(i);
// division is expensive! multiply with inverse instead
double invLen = 1 / calculateLength(leftMatrix.getMatrixRowPtr(i), leftMatrix.colNum);
// all but last coordinates
scaleAndCopy(dTmp + 1, dQuery, invLen, leftMatrix.colNum);
// last coordinate
dTmp[0] = 0;
queryMatrix.setLengthInData(i, 1); // ||q|| = 1
}
}
inline void transformProbeMatrix(VectorMatrix& rightMatrix) {
// transform probeMatrix (transform ||p|| to less than 1 and p = [sqrt(1- ||p|| * ||p||);p])
// we need the longest vector from probeMatrix
double maxLen = 0;
for (row_type i = 0; i < rightMatrix.rowNum; i++) {
double len = calculateLength(rightMatrix.getMatrixRowPtr(i), rightMatrix.colNum);
if (len > maxLen) {
maxLen = len;
}
}
double invMaxLen = 1 / maxLen;
probeMatrix.rowNum = rightMatrix.rowNum;
probeMatrix.colNum = rightMatrix.colNum + 1;
probeMatrix.initializeBasics(probeMatrix.colNum, probeMatrix.rowNum, false);
#pragma omp parallel for schedule(static,1000)
for (row_type i = 0; i < probeMatrix.rowNum; i++) {
double* dProbe = rightMatrix.getMatrixRowPtr(i);
double* dTmp = probeMatrix.getMatrixRowPtr(i);
// all but last coordinates
scaleAndCopy(dTmp + 1, dProbe, invMaxLen, rightMatrix.colNum); // multiply with inverse
// last coordinate
double len = calculateLength(dTmp, rightMatrix.colNum); // use new values but without the new coordinate
dTmp[0] = ((1 - len * len) < 0) ? 0 : sqrt(1 - len * len);
probeMatrix.setLengthInData(i, 1); // set to 1 by the transformation
}
}
inline void printAlgoName(const VectorMatrix& leftMatrix) {
logging << "SIMPLE_LSH" << "\t" << args.threads << "\t";
std::cout << "[ALGORITHM] SIMPLE_LSH with " << args.threads << " thread(s)" << std::endl;
logging << "P(" << probeMatrix.rowNum << "x" << (0 + probeMatrix.colNum) << ")\t";
logging << "Q^T(" << leftMatrix.rowNum << "x" << (0 + leftMatrix.colNum) << ")\t";
}
inline void initializeInternal(std::vector<VectorMatrix>& queryMatrices, VectorMatrix& leftMatrix) {
std::cout << "[RETRIEVAL] QueryMatrix contains " << leftMatrix.rowNum << " vectors with dimensionality " << (0 + leftMatrix.colNum) << std::endl;
row_type myNumThreads = args.threads;
if (leftMatrix.rowNum < args.threads) {
myNumThreads = leftMatrix.rowNum;
std::cout << "[WARNING] Query matrix contains too few elements. Suboptimal running with " << myNumThreads << " thread(s)" << std::endl;
}
omp_set_num_threads(myNumThreads);
queryMatrices.resize(myNumThreads);
timer.start();
if (!isTransformed) {
std::cout << "[RETRIEVAL] QueryMatrix will be transformed" << std::endl;
VectorMatrix queryMatrix;
SimpleLsh::transformQueryMatrix(leftMatrix, queryMatrix);
splitMatrices(queryMatrix, queryMatrices);
} else {
splitMatrices(leftMatrix, queryMatrices);
}
timer.stop();
dataPreprocessingTimeLeft += timer.elapsedTime().nanos();
for (row_type i = 0; i < myNumThreads; i++) {
retrArg[i].initializeBasics(queryMatrices[i], probeMatrix, LEMP_LSH, args.theta, args.k, myNumThreads, 1, 0, 0, 0, false, false);
retrArg[i].init(probeMatrix.rowNum);
retrArg[i].clear();
}
}
inline void processIndexesTopk(double * query, row_type queryId,
LshIndex* index, LshIndex* queryIndex, ProbeBucket& probeBucket, RetrievalArguments* arg) {
row_type numCandidatesToVerify = 0;
index->lshBins->getCandidates(queryIndex->cosSketches->sketches, queryId, arg->candidatesToVerify, numCandidatesToVerify,
arg->done, LSH_SIGNATURES, probeBucket.startPos);
verifyCandidatesTopK_noLengthTest(query, numCandidatesToVerify, arg);
}
public:
bool isTransformed;
inline SimpleLsh(InputArguments& input, bool isTransformed) : isTransformed(isTransformed) {
args.copyInputArguments(input);
// now do the logging
logging.open(args.logFile.c_str(), std::ios_base::app);
if (!logging.is_open()) {
std::cout << "[WARNING] No log will be created!" << std::endl;
} else {
std::cout << "[INFO] Logging in " << args.logFile << std::endl;
}
omp_set_num_threads(args.threads);
retrArg.resize(args.threads);
}
inline ~SimpleLsh() {
logging.close();
}
void initialize(VectorMatrix& rightMatrix) {
std::cout << "[INIT] ProbeMatrix contains " << rightMatrix.rowNum << " vectors with dimensionality " << (0 + rightMatrix.colNum) << std::endl;
logging << "P(" << rightMatrix.rowNum << "x" << (0 + rightMatrix.colNum) << ")\t";
if (!isTransformed) {
std::cout << "[INIT] ProbeMatrix will be transformed" << std::endl;
timer.start();
transformProbeMatrix(rightMatrix);
timer.stop();
dataPreprocessingTimeRight += timer.elapsedTime().nanos();
} else {
probeMatrix = rightMatrix;
}
probeBucketK.init(probeMatrix, 0, args.k, args);
probeBucket.init(probeMatrix, args.k, probeMatrix.rowNum, args); // initialize
if (probeBucket.ptrIndexes[LSH] == 0)
probeBucket.ptrIndexes[LSH] = new LshIndex;
static_cast<LshIndex*> (probeBucket.ptrIndexes[LSH])->initializeLists(probeMatrix, true, args.k, probeMatrix.rowNum);
}
inline void runTopK(VectorMatrix& leftMatrix, Results& results) {
printAlgoName(leftMatrix);
std::vector<VectorMatrix> queryMatrices;
initializeInternal(queryMatrices, leftMatrix);
results.resultsVector.resize(args.threads);
LshIndex* index = static_cast<LshIndex*> (probeBucket.getIndex(LSH));
timer.start();
if (LSH_SIGNATURES > index->initializedSketchesForIndex) {
index->checkAndReallocateAll(retrArg[0].probeMatrix, true, probeBucket.startPos, probeBucket.endPos, LSH_SIGNATURES,
retrArg[0].sums, retrArg[0].countsOfBlockValues, retrArg[0].sketches, false);
}
timer.stop();
dataPreprocessingTimeRight += timer.elapsedTime().nanos();
std::cout << "[RETRIEVAL] Retrieval (k = " << args.k << ") starts ..." << std::endl;
logging << "k(" << args.k << ")\t";
timer.start();
comp_type comparisons = 0;
#pragma omp parallel reduction(+ : comparisons)
{
row_type tid = omp_get_thread_num();
LshIndex queryIndex; // separate for each thread
queryIndex.initializeLists(queryMatrices[tid], false, 0, queryMatrices[tid].rowNum);
queryIndex.checkAndReallocateAll(retrArg[tid].queryMatrix, false, 0, queryMatrices[tid].rowNum, LSH_SIGNATURES,
retrArg[tid].sums, retrArg[tid].countsOfBlockValues, retrArg[tid].sketches, false);
retrArg[tid].allocTopkResults();
for (row_type i = 0; i < queryMatrices[tid].rowNum; i++) {
double* query = queryMatrices[tid].getMatrixRowPtr(i);
retrArg[tid].queryId = i;
for (row_type j = 0; j < args.k; j++) {
double ip = queryMatrices[tid].innerProduct(i, probeMatrix.getMatrixRowPtr(j));
retrArg[tid].comparisons++;
retrArg[tid].heap[j] = QueueElement(ip, j);
}
std::make_heap(retrArg[tid].heap.begin(), retrArg[tid].heap.end(), std::greater<QueueElement>()); //make the heap;
processIndexesTopk(query, i, index, &queryIndex, probeBucket, &retrArg[tid]);
retrArg[tid].writeHeapToTopk(i);
}
retrArg[tid].extendIncompleteResultItems();
results.moveAppend(retrArg[tid].results, tid);
comparisons += retrArg[tid].comparisons;
}
timer.stop();
retrievalTime += timer.elapsedTime().nanos();
totalComparisons += comparisons;
std::cout << "[RETRIEVAL] ... and is finished with " << results.getResultSize() << " results" << std::endl;
logging << results.getResultSize() << "\t";
outputStats();
}
};
}
#endif /* LSH_ALL_H */
|
HETsearch.c | #include "HETsearch.h"
// Heterogeneus search with: (1) SSE instructions and Score Profile in CPU (2) KNC instructions and Adaptive Profile in MIC
void het_search_sse_sp_knc_ap (char * query_sequences, unsigned short int * query_sequences_lengths, unsigned int query_sequences_count, unsigned long int Q,
unsigned int * query_disp, unsigned long int vect_sequences_db_count, char ** chunk_b, unsigned int chunk_count, unsigned int * chunk_vect_sequences_db_count,
unsigned short int ** chunk_n, unsigned int ** chunk_b_disp, unsigned long int * chunk_vD, char * submat, int open_gap, int extend_gap,
int cpu_threads, int cpu_block_size, int num_mics, int mic_threads, int * scores, double * workTime, unsigned short int query_length_threshold) {
unsigned long int offload_max_vD=0, * chunk_accum_vect_sequences_db_count;
long int i=0;
double tick;
unsigned short int * m, *n, sequences_db_max_length, query_sequences_max_length;
unsigned int * a_disp, * b_disp = NULL, offload_max_vect_sequences_db_count=0, qp_count, sp_count;
char *a, * b, * queryProfiles;
a = query_sequences; b = chunk_b[0];
m = query_sequences_lengths; n = chunk_n[0];
a_disp = query_disp; b_disp = chunk_b_disp[0];
query_sequences_max_length = query_sequences_lengths[query_sequences_count-1];
sequences_db_max_length = chunk_n[chunk_count-1][chunk_vect_sequences_db_count[chunk_count-1]-1];
// calculate maximum chunk size
for (i=0; i<chunk_count ; i++)
offload_max_vD = (offload_max_vD > chunk_vD[i] ? offload_max_vD : chunk_vD[i]);
// calculate maximum chunk sequences count
for (i=0; i<chunk_count ; i++)
offload_max_vect_sequences_db_count = (offload_max_vect_sequences_db_count > chunk_vect_sequences_db_count[i] ? offload_max_vect_sequences_db_count : chunk_vect_sequences_db_count[i]);
// calculate number of query sequences that are processed with query and score profile
i = 0;
while ((i < query_sequences_count) && (query_sequences_lengths[i] < query_length_threshold))
i++;
qp_count = i;
sp_count = query_sequences_count-qp_count;
// build query profile's
queryProfiles = (char *)_mm_malloc(Q*BLOSUM_COLS*sizeof(char), 64);
for (i=0; i<Q ; i++)
memcpy(queryProfiles+i*BLOSUM_COLS,submat+a[i]*BLOSUM_COLS,BLOSUM_COLS*sizeof(char));
// Allocate memory for CPU buffers
chunk_accum_vect_sequences_db_count = (unsigned long int *)_mm_malloc(chunk_count*sizeof(unsigned long int), 32);
chunk_accum_vect_sequences_db_count[0] = 0;
for (i=1; i<chunk_count ; i++)
chunk_accum_vect_sequences_db_count[i] = chunk_accum_vect_sequences_db_count[i-1] + chunk_vect_sequences_db_count[i-1];
// allow nested parallelism
omp_set_nested(1);
tick = dwalltime();
#pragma omp parallel default(none) shared(queryProfiles, submat, a,m,a_disp,query_sequences_count,b,n,b_disp,vect_sequences_db_count,scores, cpu_block_size, num_mics, mic_threads, open_gap, extend_gap, cpu_threads, qp_count,sp_count, chunk_b, chunk_n, chunk_b_disp, chunk_vD, query_sequences_max_length, sequences_db_max_length, Q, chunk_vect_sequences_db_count, chunk_accum_vect_sequences_db_count, chunk_count, offload_max_vD, offload_max_vect_sequences_db_count, query_length_threshold) num_threads(num_mics+1)
{
// data for MIC thread
__declspec(align(64)) __m512i *mic_row_ptrs[MIC_MAX_NUM_THREADS]={NULL}, *mic_maxCol_ptrs[MIC_MAX_NUM_THREADS]={NULL}, *mic_maxRow_ptrs[MIC_MAX_NUM_THREADS]={NULL}, *mic_lastCol_ptrs[MIC_MAX_NUM_THREADS]={NULL};
__declspec(align(64)) char * mic_scoreProfile_ptrs[MIC_MAX_NUM_THREADS]={NULL};
unsigned long int offload_vD, scores_offset;
unsigned int mic_chunks=0, offload_vect_sequences_db_count;
int * mic_scores;
// data for CPU thread
__m128i *cpu_row1_ptrs[cpu_threads], *cpu_row2_ptrs[cpu_threads], *cpu_row3_ptrs[cpu_threads];
__m128i *cpu_maxCol_ptrs[cpu_threads], *cpu_maxRow_ptrs[cpu_threads], *cpu_lastCol_ptrs[cpu_threads];
char * cpu_scoreProfile_ptrs[cpu_threads];
int cpu_chunks=0;
unsigned int cpu_vect_sequences_db_count;
// common data
unsigned long int i, c;
unsigned int * ptr_chunk_b_disp;
int tid;
unsigned short int * ptr_chunk_n;
char * ptr_chunk_b;
tid = omp_get_thread_num();
if (tid < num_mics){
// allocate buffers for MIC thread
mic_scores = (int*) _mm_malloc(query_sequences_count*(offload_max_vect_sequences_db_count*MIC_KNC_INT32_VECTOR_LENGTH)*sizeof(int), 64);
// pre-allocate buffers and transfer common thread data to corresponding MIC
#pragma offload_transfer target(mic:tid) in(submat: length(BLOSUM_ELEMS) ALLOC) in(queryProfiles: length(Q*BLOSUM_COLS) ALLOC) \
in(a: length(Q) ALLOC) in(m:length(query_sequences_count) ALLOC) in(a_disp: length(query_sequences_count) ALLOC) \
nocopy(b:length(offload_max_vD) ALLOC) nocopy(n:length(offload_max_vect_sequences_db_count) ALLOC) nocopy(b_disp: length(offload_max_vect_sequences_db_count) ALLOC) \
nocopy(mic_scores: length(query_sequences_count*offload_max_vect_sequences_db_count*MIC_KNC_INT32_VECTOR_LENGTH) ALLOC) \
nocopy(mic_row_ptrs, mic_maxCol_ptrs, mic_maxRow_ptrs, mic_lastCol_ptrs, mic_scoreProfile_ptrs: ALLOC)
}
// distribute database chunk between MICs and CPU using dynamic scheduling
#pragma omp for schedule(dynamic) nowait
for (c=0; c < chunk_count ; c++) {
ptr_chunk_b = chunk_b[c];
ptr_chunk_n = chunk_n[c];
ptr_chunk_b_disp = chunk_b_disp[c];
scores_offset = chunk_accum_vect_sequences_db_count[c];
if (tid < num_mics){ // MIC thread
offload_vD = chunk_vD[c];
offload_vect_sequences_db_count = chunk_vect_sequences_db_count[c];
// process database chunk in MIC
#pragma offload target(mic:tid) in(ptr_chunk_b[0:offload_vD] : into(b[0:offload_vD]) REUSE) \
in(ptr_chunk_n[0:offload_vect_sequences_db_count] : into(n[0:offload_vect_sequences_db_count]) REUSE) \
in(ptr_chunk_b_disp[0:offload_vect_sequences_db_count] : into(b_disp[0:offload_vect_sequences_db_count]) REUSE) \
out(mic_scores: length(query_sequences_count*offload_vect_sequences_db_count*MIC_KNC_INT32_VECTOR_LENGTH) REUSE) \
in(a: length(0) REUSE) in(m: length(0) REUSE) in(a_disp: length(0) REUSE) in(submat: length(0) REUSE) in(queryProfiles: length(0) REUSE) \
nocopy(mic_row_ptrs, mic_maxCol_ptrs, mic_maxRow_ptrs, mic_lastCol_ptrs, mic_scoreProfile_ptrs: REUSE)
#pragma omp parallel shared(c, mic_chunks, offload_vect_sequences_db_count, query_sequences_count, open_gap, extend_gap, query_sequences_max_length, sequences_db_max_length, query_length_threshold) num_threads(mic_threads)
{
__m512i *row, *maxCol, *maxRow, *lastCol;
int * ptr_scores;
char * ptr_a, * ptr_b, *ptr_b_block, * scoreProfile, *queryProfile, *ptr_scoreProfile;
__declspec(align(64)) __m512i vzero = _mm512_setzero_epi32(), score, previous, current, aux1, aux2, aux4, auxLastCol;
__declspec(align(64)) __m512i vextend_gap = _mm512_set1_epi32(extend_gap), vopen_extend_gap = _mm512_set1_epi32(open_gap+extend_gap);
__declspec(align(64)) __m512i v16 = _mm512_set1_epi32(16), submat_hi, submat_lo, b_values;
__mmask16 mask;
unsigned int tid, i, j, jj, k, disp_1, disp_2, disp_3, dim1, nbb;
unsigned long int t, s, q;
tid = omp_get_thread_num();
// if this is the first offload, allocate auxiliary buffers
if (mic_chunks == 0) {
mic_row_ptrs[tid] = (__m512i *) _mm_malloc((MIC_KNC_BLOCK_SIZE+1)*sizeof(__m512i), 64);
mic_maxCol_ptrs[tid] = (__m512i *) _mm_malloc((MIC_KNC_BLOCK_SIZE+1)*sizeof(__m512i), 64);
mic_maxRow_ptrs[tid] = (__m512i *) _mm_malloc((query_sequences_max_length)*sizeof(__m512i), 64);
mic_lastCol_ptrs[tid] = (__m512i *) _mm_malloc((query_sequences_max_length)*sizeof(__m512i), 64);
if (query_sequences_max_length >= query_length_threshold)
mic_scoreProfile_ptrs[tid] = (char *) _mm_malloc(BLOSUM_ROWS_x_MIC_KNC_INT32_VECTOR_LENGTH*MIC_KNC_BLOCK_SIZE*sizeof(char),16);
}
row = mic_row_ptrs[tid];
maxCol = mic_maxCol_ptrs[tid];
maxRow = mic_maxRow_ptrs[tid];
lastCol = mic_lastCol_ptrs[tid];
scoreProfile = mic_scoreProfile_ptrs[tid];
// calculate chunk alignments using query profile technique
#pragma omp for schedule(dynamic) nowait
for (t=0; t< qp_count*offload_vect_sequences_db_count; t++) {
q = (qp_count-1) - (t % qp_count);
s = (offload_vect_sequences_db_count-1) - (t / qp_count);
queryProfile = queryProfiles + a_disp[q]*BLOSUM_COLS;
ptr_b = b + b_disp[s];
ptr_scores = mic_scores + (q*offload_vect_sequences_db_count+s)*MIC_KNC_INT32_VECTOR_LENGTH;
// init buffers
#pragma unroll(MIC_KNC_UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm512_setzero_epi32(); // index 0 is not used
#pragma unroll(MIC_KNC_UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm512_setzero_epi32();
// set score to 0
score = _mm512_setzero_epi32();
// calculate number of blocks
nbb = ceil( (double) n[s] / (double) MIC_KNC_BLOCK_SIZE);
for (k=0; k < nbb; k++){
// calculate dim1
disp_1 = k*MIC_KNC_BLOCK_SIZE;
dim1 = (MIC_KNC_BLOCK_SIZE < n[s]-disp_1 ? MIC_KNC_BLOCK_SIZE : n[s]-disp_1);
// get b block
ptr_b_block = ptr_b + disp_1*MIC_KNC_INT32_VECTOR_LENGTH;
// init buffers
#pragma unroll(MIC_KNC_UNROLL_COUNT)
for (i=1; i<dim1+1 ; i++ ) maxCol[i] = _mm512_setzero_epi32(); //index 0 is not used
#pragma unroll(MIC_KNC_UNROLL_COUNT)
for (i=0; i<dim1 ; i++ ) row[i] = _mm512_setzero_epi32();
auxLastCol = _mm512_setzero_epi32();
for( i = 0; i < m[q]; i++){
// previous must start in 0
previous = _mm512_setzero_epi32();
// update row[0] with lastCol elements
row[0] = lastCol[i];
// load submat values corresponding to current a residue
disp_1 = i*BLOSUM_COLS;
#if __MIC__
submat_lo = _mm512_extload_epi32(queryProfile+disp_1, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, 0);
submat_hi = _mm512_extload_epi32(queryProfile+disp_1+16, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, 0);
#endif
#pragma unroll(MIC_KNC_UNROLL_COUNT)
for( jj=1; jj < dim1+1; jj++) {
//calcuate the diagonal value
#if __MIC__
b_values = _mm512_extload_epi32(ptr_b_block+(jj-1)*MIC_KNC_INT32_VECTOR_LENGTH, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, 0);
#endif
mask = _mm512_cmpge_epi32_mask(b_values,v16);
aux1 = _mm512_permutevar_epi32(b_values, submat_lo);
aux1 = _mm512_mask_permutevar_epi32(aux1, mask, b_values, submat_hi);
current = _mm512_add_epi32(row[jj-1], aux1);
// calculate current max value
current = _mm512_max_epi32(current, maxRow[i]);
current = _mm512_max_epi32(current, maxCol[jj]);
current = _mm512_max_epi32(current, vzero);
// update maxRow and maxCol
maxRow[i] = _mm512_sub_epi32(maxRow[i], vextend_gap);
maxCol[jj] = _mm512_sub_epi32(maxCol[jj], vextend_gap);
aux1 = _mm512_sub_epi32(current, vopen_extend_gap);
maxRow[i] = _mm512_max_epi32(maxRow[i], aux1);
maxCol[jj] = _mm512_max_epi32(maxCol[jj], aux1);
// update row buffer
row[jj-1] = previous;
previous = current;
// update max score
score = _mm512_max_epi32(score,current);
}
// update lastCol
lastCol[i] = auxLastCol;
auxLastCol = current;
}
}
// store max value
_mm512_store_epi32(ptr_scores, score);
}
// calculate chunk alignments using score profile technique
#pragma omp for schedule(dynamic) nowait
for (t=0; t< sp_count*offload_vect_sequences_db_count; t++) {
q = qp_count + (sp_count-1) - (t % sp_count);
s = (offload_vect_sequences_db_count-1) - (t / sp_count);
ptr_a = a + a_disp[q];
ptr_b = b + b_disp[s];
ptr_scores = mic_scores + (q*offload_vect_sequences_db_count+s)*MIC_KNC_INT32_VECTOR_LENGTH;
// init buffers
#pragma unroll(MIC_KNC_UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm512_setzero_epi32(); // index 0 is not used
#pragma unroll(MIC_KNC_UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm512_setzero_epi32();
// set score to 0
score = _mm512_setzero_epi32();
// calculate number of blocks
nbb = ceil( (double) n[s] / (double) MIC_KNC_BLOCK_SIZE);
for (k=0; k < nbb; k++){
// calculate dim1
disp_2 = k*MIC_KNC_BLOCK_SIZE;
dim1 = (MIC_KNC_BLOCK_SIZE < n[s]-disp_2 ? MIC_KNC_BLOCK_SIZE : n[s]-disp_2);
// init buffers
#pragma unroll(MIC_KNC_UNROLL_COUNT)
for (i=1; i<dim1+1 ; i++ ) maxCol[i] = _mm512_setzero_epi32(); //index 0 is not used
#pragma unroll(MIC_KNC_UNROLL_COUNT)
for (i=0; i<dim1 ; i++ ) row[i] = _mm512_setzero_epi32();
auxLastCol = _mm512_setzero_epi32();
// build score profile
disp_1 = dim1*MIC_KNC_INT32_VECTOR_LENGTH;
for (i=0; i< dim1 ;i++ ) {
#if __MIC__
aux1 = _mm512_extload_epi32(ptr_b+(disp_2+i)*MIC_KNC_INT32_VECTOR_LENGTH, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, 0);
#endif
disp_3 = i*MIC_KNC_INT32_VECTOR_LENGTH;
#pragma unroll(MIC_KNC_UNROLL_COUNT)
for (j=0; j< BLOSUM_ROWS-1; j++) {
#if __MIC__
aux2 = _mm512_i32extgather_epi32(aux1, submat + j*BLOSUM_COLS, _MM_UPCONV_EPI32_SINT8 , 1, 0);
_mm512_extstore_epi32(scoreProfile+disp_3+j*disp_1, aux2, _MM_DOWNCONV_EPI32_SINT8 , _MM_HINT_NONE );
#endif
}
#if __MIC__
_mm512_extstore_epi32(scoreProfile+disp_3+(BLOSUM_ROWS-1)*disp_1, vzero, _MM_DOWNCONV_EPI32_SINT8 , _MM_HINT_NONE );
#endif
}
for( i = 0; i < m[q]; i++){
// previous must start in 0
previous = _mm512_setzero_epi32();
// update row[0] with lastCol elements
row[0] = lastCol[i];
// calculate i displacement
ptr_scoreProfile = scoreProfile + ((int)(ptr_a[i]))*disp_1;
#pragma unroll(MIC_KNC_UNROLL_COUNT)
for( jj=1; jj < dim1+1; jj++) {
//calcuate the diagonal value
#if __MIC__
current = _mm512_add_epi32(row[jj-1], _mm512_extload_epi32(ptr_scoreProfile+(jj-1)*MIC_KNC_INT32_VECTOR_LENGTH, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, 0));
#endif
// calculate current max value
current = _mm512_max_epi32(current, maxRow[i]);
current = _mm512_max_epi32(current, maxCol[jj]);
current = _mm512_max_epi32(current, vzero);
// update maxRow and maxCol
maxRow[i] = _mm512_sub_epi32(maxRow[i], vextend_gap);
maxCol[jj] = _mm512_sub_epi32(maxCol[jj], vextend_gap);
aux4 = _mm512_sub_epi32(current, vopen_extend_gap);
maxRow[i] = _mm512_max_epi32(maxRow[i], aux4);
maxCol[jj] = _mm512_max_epi32(maxCol[jj], aux4);
// update row buffer
row[jj-1] = previous;
previous = current;
// update max score
score = _mm512_max_epi32(score,current);
}
// update lastCol
lastCol[i] = auxLastCol;
auxLastCol = current;
}
}
// store max value
_mm512_store_epi32(ptr_scores, score);
}
}
// copy scores from auxiliary buffer to final buffer
for (i=0; i<query_sequences_count ; i++)
memcpy(scores+(i*vect_sequences_db_count+scores_offset)*MIC_KNC_INT32_VECTOR_LENGTH,mic_scores+i*offload_vect_sequences_db_count*MIC_KNC_INT32_VECTOR_LENGTH,offload_vect_sequences_db_count*MIC_KNC_INT32_VECTOR_LENGTH*sizeof(int));
mic_chunks++;
} else {
cpu_vect_sequences_db_count = chunk_vect_sequences_db_count[c];
// process database chunk in CPU
#pragma omp parallel num_threads(cpu_threads-num_mics)
{
__m128i *row1, *row2, *row3, *maxCol, *maxRow, *lastCol, * ptr_scores, *tmp, *ptr_scoreProfile1, *ptr_scoreProfile2;
char * ptr_a, * ptr_b, * scoreProfile;
__declspec(align(32)) __m128i score, current, auxBlosum[2], auxLastCol, b_values;
__declspec(align(32)) __m128i aux0, aux1, aux2, aux3, aux4, aux5, aux6, aux7;
__declspec(align(32)) __m128i vextend_gap_epi8 = _mm_set1_epi8(extend_gap), vopen_extend_gap_epi8 = _mm_set1_epi8(open_gap+extend_gap), vzero_epi8 = _mm_set1_epi8(0);
__declspec(align(32)) __m128i vextend_gap_epi16 = _mm_set1_epi16(extend_gap), vopen_extend_gap_epi16 = _mm_set1_epi16(open_gap+extend_gap), vzero_epi16 = _mm_set1_epi16(0);
__declspec(align(32)) __m128i vextend_gap_epi32 = _mm_set1_epi32(extend_gap), vopen_extend_gap_epi32 = _mm_set1_epi32(open_gap+extend_gap), vzero_epi32 = _mm_set1_epi32(0);
__declspec(align(32)) __m128i v127 = _mm_set1_epi8(127), v32767 = _mm_set1_epi16(32767);
__declspec(align(32)) __m128i v15 = _mm_set1_epi8(15), v16 = _mm_set1_epi8(16), vneg32 = _mm_set1_epi8(-32);
unsigned int j, ii, jj, k, disp_1, disp_2, disp_3, disp_4, dim1, dim2, nbb;
unsigned long int t, s, q;
int tid, overflow_flag, bb1, bb2, bb1_start, bb2_start, bb1_end, bb2_end;
tid = omp_get_thread_num();
if (cpu_chunks == 0) {
// allocate buffers for CPU thread
cpu_row1_ptrs[tid] = (__m128i *) _mm_malloc((cpu_block_size+1)*sizeof(__m128i), 32);
cpu_row2_ptrs[tid] = (__m128i *) _mm_malloc((cpu_block_size+1)*sizeof(__m128i), 32);
cpu_row3_ptrs[tid] = (__m128i *) _mm_malloc((cpu_block_size+1)*sizeof(__m128i), 32);
cpu_maxCol_ptrs[tid] = (__m128i *) _mm_malloc((cpu_block_size+1)*sizeof(__m128i), 32);
cpu_maxRow_ptrs[tid] = (__m128i *) _mm_malloc((query_sequences_max_length)*sizeof(__m128i), 32);
cpu_lastCol_ptrs[tid] = (__m128i *) _mm_malloc((query_sequences_max_length)*sizeof(__m128i), 32);
cpu_scoreProfile_ptrs[tid] = (char *) _mm_malloc((BLOSUM_ROWS_x_CPU_SSE_INT8_VECTOR_LENGTH*cpu_block_size)*sizeof(char), 32);
}
row1 = cpu_row1_ptrs[tid];
row2 = cpu_row2_ptrs[tid];
row3 = cpu_row3_ptrs[tid];
maxCol = cpu_maxCol_ptrs[tid];
maxRow = cpu_maxRow_ptrs[tid];
lastCol = cpu_lastCol_ptrs[tid];
scoreProfile = cpu_scoreProfile_ptrs[tid];
// calculate chunk alignments using score profile
#pragma omp for schedule(dynamic) nowait
for (t=0; t< query_sequences_count*cpu_vect_sequences_db_count; t++) {
q = (query_sequences_count-1) - (t % query_sequences_count);
s = (cpu_vect_sequences_db_count-1) - (t / query_sequences_count);
ptr_a = a + a_disp[q];
ptr_b = ptr_chunk_b + ptr_chunk_b_disp[s];
ptr_scores = (__m128i *) (scores + (q*vect_sequences_db_count+scores_offset+s)*CPU_SSE_INT8_VECTOR_LENGTH);
// calculate number of blocks
nbb = ceil( (double) ptr_chunk_n[s] / (double) cpu_block_size);
// init buffers
#pragma unroll(CPU_SSE_UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm_set1_epi8(0);
#pragma unroll(CPU_SSE_UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm_set1_epi8(0);
// set score to 0
score = _mm_set1_epi8(0);
for (k=0; k < nbb; k++){
// calculate dim1
disp_4 = k*cpu_block_size;
dim1 = ptr_chunk_n[s]-disp_4;
dim1 = (cpu_block_size < dim1 ? cpu_block_size : dim1);
// calculate dim2
dim2 = dim1 / SEQ_LEN_MULT;
// init buffers
#pragma unroll(CPU_SSE_UNROLL_COUNT)
for (i=0; i<dim1+1 ; i++ ) maxCol[i] = _mm_set1_epi8(0);
#pragma unroll(CPU_SSE_UNROLL_COUNT)
for (i=0; i<dim1+1 ; i++ ) row1[i] = _mm_set1_epi8(0);
auxLastCol = _mm_set1_epi8(0);
// calculate a[i] displacement
disp_1 = dim1*CPU_SSE_INT8_VECTOR_LENGTH;
// build score profile
for (i=0; i< dim1 ;i++ ) {
// indexes
b_values = _mm_loadu_si128((__m128i *) (ptr_b + (disp_4+i)*CPU_SSE_INT8_VECTOR_LENGTH));
// indexes >= 16
aux1 = _mm_sub_epi8(b_values, v16);
// indexes < 16
aux2 = _mm_cmpgt_epi8(b_values,v15);
aux3 = _mm_and_si128(aux2,vneg32);
aux4 = _mm_add_epi8(b_values,aux3);
ptr_scoreProfile1 = (__m128i *)(scoreProfile) + i;
for (j=0; j< BLOSUM_ROWS-1; j++) {
tmp = (__m128i *) (submat + j*BLOSUM_COLS);
auxBlosum[0] = _mm_load_si128(tmp);
auxBlosum[1] = _mm_load_si128(tmp+1);
aux5 = _mm_shuffle_epi8(auxBlosum[0], aux4);
aux6 = _mm_shuffle_epi8(auxBlosum[1], aux1);
aux7 = _mm_add_epi8(aux5, aux6);
_mm_store_si128(ptr_scoreProfile1+j*dim1, aux7);
}
_mm_store_si128(ptr_scoreProfile1+(BLOSUM_ROWS-1)*dim1, vzero_epi8);
}
for( i = 0; i < m[q]; i+=2){
// update row[0] with lastCol[i-1]
row1[0] = lastCol[i];
row2[0] = lastCol[i+1];
// calculate i displacement
ptr_scoreProfile1 = (__m128i *)(scoreProfile+((int)(ptr_a[i]))*disp_1);
ptr_scoreProfile2 = (__m128i *)(scoreProfile+((int)(ptr_a[i+1]))*disp_1);
// store maxRow in auxiliars
aux1 = maxRow[i];
aux2 = maxRow[i+1];
for (ii=0; ii<dim2 ; ii++) {
#pragma unroll(SEQ_LEN_MULT)
for( j=ii*SEQ_LEN_MULT+1, jj=0; jj < SEQ_LEN_MULT; jj++, j++) {
//calcuate the diagonal value
current = _mm_adds_epi8(row1[j-1], _mm_load_si128(ptr_scoreProfile1+(j-1)));
// calculate current max value
current = _mm_max_epi8(current, aux1);
current = _mm_max_epi8(current, maxCol[j]);
current = _mm_max_epi8(current, vzero_epi8);
// update maxRow and maxCol
aux1 = _mm_subs_epi8(aux1, vextend_gap_epi8);
maxCol[j] = _mm_subs_epi8(maxCol[j], vextend_gap_epi8);
aux0 = _mm_subs_epi8(current, vopen_extend_gap_epi8);
aux1 = _mm_max_epi8(aux1, aux0);
maxCol[j] = _mm_max_epi8(maxCol[j], aux0);
// update row buffer
row2[j] = current;
// update max score
score = _mm_max_epi8(score,current);
//calcuate the diagonal value
current = _mm_adds_epi8(row2[j-1], _mm_load_si128(ptr_scoreProfile2+(j-1)));
// calculate current max value
current = _mm_max_epi8(current, aux2);
current = _mm_max_epi8(current, maxCol[j]);
current = _mm_max_epi8(current, vzero_epi8);
// update maxRow and maxCol
aux2 = _mm_subs_epi8(aux2, vextend_gap_epi8);
maxCol[j] = _mm_subs_epi8(maxCol[j], vextend_gap_epi8);
aux0 = _mm_subs_epi8(current, vopen_extend_gap_epi8);
aux2 = _mm_max_epi8(aux2, aux0);
maxCol[j] = _mm_max_epi8(maxCol[j], aux0);
// update row buffer
row3[j] = current;
// update max score
score = _mm_max_epi8(score,current);
}
}
// update maxRow
maxRow[i] = aux1;
maxRow[i+1] = aux2;
// update lastCol
lastCol[i] = auxLastCol;
lastCol[i+1] = row2[dim1];
auxLastCol = current;
// swap buffers
tmp = row1;
row1 = row3;
row3 = tmp;
}
}
// store max value
_mm_store_si128 (ptr_scores,_mm_cvtepi8_epi32(score));
_mm_store_si128 (ptr_scores+1,_mm_cvtepi8_epi32(_mm_srli_si128(score,4)));
_mm_store_si128 (ptr_scores+2,_mm_cvtepi8_epi32(_mm_srli_si128(score,8)));
_mm_store_si128 (ptr_scores+3,_mm_cvtepi8_epi32(_mm_srli_si128(score,12)));
// overflow detection
aux1 = _mm_cmpeq_epi8(score,v127);
overflow_flag = _mm_test_all_zeros(aux1,v127);
// if overflow
if (overflow_flag == 0){
// check overflow in lower 8-bits
aux1 = _mm_cmpeq_epi8(_mm_slli_si128(score,8),v127);
bb1_start = _mm_test_all_zeros(aux1,v127);
// check overflow in upper 8-bits
aux1 = _mm_cmpeq_epi8(_mm_srli_si128(score,8),v127);
bb1_end = 2 - _mm_test_all_zeros(aux1,v127);
// recalculate using 16-bit signed integer precision
for (bb1=bb1_start; bb1<bb1_end ; bb1++){
// init buffers
#pragma unroll(CPU_SSE_UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm_set1_epi16(0);
#pragma unroll(CPU_SSE_UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm_set1_epi16(0);
// set score to 0
score = _mm_set1_epi16(0);
disp_2 = bb1*CPU_SSE_INT16_VECTOR_LENGTH;
for (k=0; k < nbb; k++){
// calculate dim1
disp_4 = k*cpu_block_size;
dim1 = ptr_chunk_n[s]-disp_4;
dim1 = (cpu_block_size < dim1 ? cpu_block_size : dim1);
// calculate dim2
dim2 = dim1 / SEQ_LEN_MULT;
// init buffers
#pragma unroll(CPU_SSE_UNROLL_COUNT)
for (i=0; i<dim1+1 ; i++ ) maxCol[i] = _mm_set1_epi16(0);
#pragma unroll(CPU_SSE_UNROLL_COUNT)
for (i=0; i<dim1+1 ; i++ ) row1[i] = _mm_set1_epi16(0);
auxLastCol = _mm_set1_epi16(0);
// calculate a[i] displacement
disp_1 = dim1*CPU_SSE_INT8_VECTOR_LENGTH;
// build score profile
for (i=0; i< dim1 ;i++ ) {
// indexes
b_values = _mm_loadu_si128((__m128i *) (ptr_b + (disp_4+i)*CPU_SSE_INT8_VECTOR_LENGTH));
// indexes >= 16
aux1 = _mm_sub_epi8(b_values, v16);
// indexes < 16
aux2 = _mm_cmpgt_epi8(b_values,v15);
aux3 = _mm_and_si128(aux2,vneg32);
aux4 = _mm_add_epi8(b_values,aux3);
ptr_scoreProfile1 = (__m128i *)(scoreProfile) + i;
for (j=0; j< BLOSUM_ROWS-1; j++) {
tmp = (__m128i *) (submat + j*BLOSUM_COLS);
auxBlosum[0] = _mm_load_si128(tmp);
auxBlosum[1] = _mm_load_si128(tmp+1);
aux5 = _mm_shuffle_epi8(auxBlosum[0], aux4);
aux6 = _mm_shuffle_epi8(auxBlosum[1], aux1);
aux7 = _mm_add_epi8(aux5, aux6);
_mm_store_si128(ptr_scoreProfile1+j*dim1, aux7);
}
_mm_store_si128(ptr_scoreProfile1+(BLOSUM_ROWS-1)*dim1, vzero_epi8);
}
for( i = 0; i < m[q]; i+=2){
// update row[0] with lastCol[i-1]
row1[0] = lastCol[i];
row2[0] = lastCol[i+1];
// calculate i displacement
ptr_scoreProfile1 = (__m128i *)(scoreProfile+((int)(ptr_a[i]))*disp_1+disp_2);
ptr_scoreProfile2 = (__m128i *)(scoreProfile+((int)(ptr_a[i+1]))*disp_1+disp_2);
// store maxRow in auxiliars
aux1 = maxRow[i];
aux2 = maxRow[i+1];
for (ii=0; ii<dim2 ; ii++) {
#pragma unroll(SEQ_LEN_MULT)
for( j=ii*SEQ_LEN_MULT+1, jj=0; jj < SEQ_LEN_MULT; jj++, j++) {
//calcuate the diagonal value
current = _mm_adds_epi16(row1[j-1], _mm_cvtepi8_epi16(_mm_loadu_si128(ptr_scoreProfile1+(j-1))));
// calculate current max value
current = _mm_max_epi16(current, aux1);
current = _mm_max_epi16(current, maxCol[j]);
current = _mm_max_epi16(current, vzero_epi16);
// update maxRow and maxCol
aux1 = _mm_subs_epi16(aux1, vextend_gap_epi16);
maxCol[j] = _mm_subs_epi16(maxCol[j], vextend_gap_epi16);
aux0 = _mm_subs_epi16(current, vopen_extend_gap_epi16);
aux1 = _mm_max_epi16(aux1, aux0);
maxCol[j] = _mm_max_epi16(maxCol[j], aux0);
// update row buffer
row2[j] = current;
// update max score
score = _mm_max_epi16(score,current);
//calcuate the diagonal value
current = _mm_adds_epi16(row2[j-1], _mm_cvtepi8_epi16(_mm_loadu_si128(ptr_scoreProfile2+(j-1))));
// calculate current max value
current = _mm_max_epi16(current, aux2);
current = _mm_max_epi16(current, maxCol[j]);
current = _mm_max_epi16(current, vzero_epi16);
// update maxRow and maxCol
aux2 = _mm_subs_epi16(aux2, vextend_gap_epi16);
maxCol[j] = _mm_subs_epi16(maxCol[j], vextend_gap_epi16);
aux0 = _mm_subs_epi16(current, vopen_extend_gap_epi16);
aux2 = _mm_max_epi16(aux2, aux0);
maxCol[j] = _mm_max_epi16(maxCol[j], aux0);
// update row buffer
row3[j] = current;
// update max score
score = _mm_max_epi16(score,current);
}
}
// update maxRow
maxRow[i] = aux1;
maxRow[i+1] = aux2;
// update lastCol
lastCol[i] = auxLastCol;
lastCol[i+1] = row2[dim1];
auxLastCol = current;
// swap buffers
tmp = row1;
row1 = row3;
row3 = tmp;
}
}
// store max value
_mm_store_si128 (ptr_scores+bb1*2,_mm_cvtepi16_epi32(score));
_mm_store_si128 (ptr_scores+bb1*2+1,_mm_cvtepi16_epi32(_mm_srli_si128(score,8)));
// overflow detection
aux1 = _mm_cmpeq_epi16(score,v32767);
overflow_flag = _mm_test_all_zeros(aux1,v32767);
// if overflow
if (overflow_flag == 0){
// overflow detection in lower 16-bits
aux1 = _mm_cmpeq_epi16(_mm_slli_si128(score,8),v32767);
bb2_start = _mm_test_all_zeros(aux1,v32767);
// overflow detection in upper 16-bits
aux1 = _mm_cmpeq_epi16(_mm_srli_si128(score,8),v32767);
bb2_end = 2 - _mm_test_all_zeros(aux1,v32767);
// recalculate using 32-bit signed integer precision
for (bb2=bb2_start; bb2<bb2_end ; bb2++){
// init buffers
#pragma unroll(CPU_SSE_UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm_set1_epi32(0);
#pragma unroll(CPU_SSE_UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm_set1_epi32(0);
// set score to 0
score = _mm_set1_epi32(0);
disp_3 = disp_2 + bb2*CPU_SSE_INT32_VECTOR_LENGTH;
for (k=0; k < nbb; k++){
// calculate dim1
disp_4 = k*cpu_block_size;
dim1 = ptr_chunk_n[s]-disp_4;
dim1 = (cpu_block_size < dim1 ? cpu_block_size : dim1);
// calculate dim2
dim2 = dim1 / SEQ_LEN_MULT;
// init buffers
#pragma unroll(CPU_SSE_UNROLL_COUNT)
for (i=0; i<dim1+1 ; i++ ) maxCol[i] = _mm_set1_epi32(0);
#pragma unroll(CPU_SSE_UNROLL_COUNT)
for (i=0; i<dim1+1 ; i++ ) row1[i] = _mm_set1_epi32(0);
auxLastCol = _mm_set1_epi32(0);
// calculate a[i] displacement
disp_1 = dim1*CPU_SSE_INT8_VECTOR_LENGTH;
// build score profile
for (i=0; i< dim1 ;i++ ) {
// indexes
b_values = _mm_loadu_si128((__m128i *) (ptr_b + (disp_4+i)*CPU_SSE_INT8_VECTOR_LENGTH));
// indexes >= 16
aux1 = _mm_sub_epi8(b_values, v16);
// indexes < 16
aux2 = _mm_cmpgt_epi8(b_values,v15);
aux3 = _mm_and_si128(aux2,vneg32);
aux4 = _mm_add_epi8(b_values,aux3);
ptr_scoreProfile1 = (__m128i *)(scoreProfile) + i;
for (j=0; j< BLOSUM_ROWS-1; j++) {
tmp = (__m128i *) (submat + j*BLOSUM_COLS);
auxBlosum[0] = _mm_load_si128(tmp);
auxBlosum[1] = _mm_load_si128(tmp+1);
aux5 = _mm_shuffle_epi8(auxBlosum[0], aux4);
aux6 = _mm_shuffle_epi8(auxBlosum[1], aux1);
aux7 = _mm_add_epi8(aux5, aux6);
_mm_store_si128(ptr_scoreProfile1+j*dim1, aux7);
}
_mm_store_si128(ptr_scoreProfile1+(BLOSUM_ROWS-1)*dim1, vzero_epi8);
}
for( i = 0; i < m[q]; i+=2){
// update row[0] with lastCol[i-1]
row1[0] = lastCol[i];
row2[0] = lastCol[i+1];
// calculate i displacement
ptr_scoreProfile1 = (__m128i *)(scoreProfile+((int)(ptr_a[i]))*disp_1+disp_3);
ptr_scoreProfile2 = (__m128i *)(scoreProfile+((int)(ptr_a[i+1]))*disp_1+disp_3);
// store maxRow in auxiliars
aux1 = maxRow[i];
aux2 = maxRow[i+1];
for (ii=0; ii<dim2 ; ii++) {
#pragma unroll(SEQ_LEN_MULT)
for( j=ii*SEQ_LEN_MULT+1, jj=0; jj < SEQ_LEN_MULT; jj++, j++) {
//calcuate the diagonal value
current = _mm_add_epi32(row1[j-1], _mm_cvtepi8_epi32(_mm_loadu_si128(ptr_scoreProfile1+(j-1))));
// calculate current max value
current = _mm_max_epi32(current, aux1);
current = _mm_max_epi32(current, maxCol[j]);
current = _mm_max_epi32(current, vzero_epi32);
// update maxRow and maxCol
aux1 = _mm_sub_epi32(aux1, vextend_gap_epi32);
maxCol[j] = _mm_sub_epi32(maxCol[j], vextend_gap_epi32);
aux0 = _mm_sub_epi32(current, vopen_extend_gap_epi32);
aux1 = _mm_max_epi32(aux1, aux0);
maxCol[j] = _mm_max_epi32(maxCol[j], aux0);
// update row buffer
row2[j] = current;
// update max score
score = _mm_max_epi32(score,current);
//calcuate the diagonal value
current = _mm_add_epi32(row2[j-1], _mm_cvtepi8_epi32(_mm_loadu_si128(ptr_scoreProfile2+(j-1))));
// calculate current max value
current = _mm_max_epi32(current, aux2);
current = _mm_max_epi32(current, maxCol[j]);
current = _mm_max_epi32(current, vzero_epi32);
// update maxRow and maxCol
aux2 = _mm_sub_epi32(aux2, vextend_gap_epi32);
maxCol[j] = _mm_sub_epi32(maxCol[j], vextend_gap_epi32);
aux0 = _mm_sub_epi32(current, vopen_extend_gap_epi32);
aux2 = _mm_max_epi32(aux2, aux0);
maxCol[j] = _mm_max_epi32(maxCol[j], aux0);
// update row buffer
row3[j] = current;
// update max score
score = _mm_max_epi32(score,current);
}
}
// update maxRow
maxRow[i] = aux1;
maxRow[i+1] = aux2;
// update lastCol
lastCol[i] = auxLastCol;
lastCol[i+1] = row2[dim1];
auxLastCol = current;
// swap buffers
tmp = row1;
row1 = row3;
row3 = tmp;
}
}
// store max value
_mm_store_si128 (ptr_scores+bb1*2+bb2,score);
}
}
}
}
}
}
cpu_chunks++;
}
}
if (tid < num_mics){
// de-allocate buffers in corresponding MIC
#pragma offload_transfer target(mic:tid) nocopy(submat: length(0) FREE) nocopy(queryProfiles: length(0) FREE) \
nocopy(a: length(0) FREE) nocopy(m:length(0) FREE) nocopy(a_disp: length(0) FREE) \
nocopy(b:length(0) FREE) nocopy(n:length(0) FREE) nocopy(b_disp: length(0) FREE) \
nocopy(mic_scores:length(0) FREE) \
nocopy(mic_row_ptrs, mic_maxCol_ptrs, mic_maxRow_ptrs, mic_lastCol_ptrs, mic_scoreProfile_ptrs: FREE)
_mm_free(mic_scores);
} else {
// de-allocate CPU buffers
if (cpu_chunks > 0){
for (i=0; i<cpu_threads-num_mics ; i++){
_mm_free(cpu_row1_ptrs[i]);
_mm_free(cpu_row2_ptrs[i]);
_mm_free(cpu_row3_ptrs[i]);
_mm_free(cpu_maxCol_ptrs[i]);
_mm_free(cpu_maxRow_ptrs[i]);
_mm_free(cpu_lastCol_ptrs[i]);
_mm_free(cpu_scoreProfile_ptrs[i]);
}
}
}
}
*workTime = dwalltime()-tick;
_mm_free(queryProfiles);
}
// Heterogeneus search with: (1) AVX2 instructions and Score Profile in CPU (2) KNC instructions and Adaptive Profile in MIC
void het_search_avx2_sp_knc_ap (char * query_sequences, unsigned short int * query_sequences_lengths, unsigned int query_sequences_count, unsigned long int Q,
unsigned int * query_disp, unsigned long int vect_sequences_db_count, char ** chunk_b, unsigned int chunk_count, unsigned int * chunk_vect_sequences_db_count,
unsigned short int ** chunk_n, unsigned int ** chunk_b_disp, unsigned long int * chunk_vD, char * submat, int open_gap, int extend_gap,
int cpu_threads, int cpu_block_size, int num_mics, int mic_threads, int * scores, double * workTime, unsigned short int query_length_threshold) {
unsigned long int offload_max_vD=0, * chunk_accum_vect_sequences_db_count;
long int i=0;
double tick;
unsigned short int * m, *n, sequences_db_max_length, query_sequences_max_length;
unsigned int * a_disp, * b_disp = NULL, offload_max_vect_sequences_db_count=0, qp_count, sp_count;
unsigned int offload_vect_sequences_db_count;
char *a, * b, *queryProfiles;
a = query_sequences; b = chunk_b[0];
m = query_sequences_lengths; n = chunk_n[0];
a_disp = query_disp; b_disp = chunk_b_disp[0];
query_sequences_max_length = query_sequences_lengths[query_sequences_count-1];
sequences_db_max_length = chunk_n[chunk_count-1][chunk_vect_sequences_db_count[chunk_count-1]-1];
// calculate maximum chunk size
for (i=0; i<chunk_count ; i++)
offload_max_vD = (offload_max_vD > chunk_vD[i] ? offload_max_vD : chunk_vD[i]);
// calculate maximum chunk sequences count
for (i=0; i<chunk_count ; i++)
offload_max_vect_sequences_db_count = (offload_max_vect_sequences_db_count > chunk_vect_sequences_db_count[i] ? offload_max_vect_sequences_db_count : chunk_vect_sequences_db_count[i]);
// calculate number of query sequences that are processed with query and score profile
i = 0;
while ((i < query_sequences_count) && (query_sequences_lengths[i] < query_length_threshold))
i++;
qp_count = i;
sp_count = query_sequences_count-qp_count;
// build query profile's
queryProfiles = (char *)_mm_malloc(Q*BLOSUM_COLS*sizeof(char), 64);
for (i=0; i<Q ; i++)
memcpy(queryProfiles+i*BLOSUM_COLS,submat+a[i]*BLOSUM_COLS,BLOSUM_COLS*sizeof(char));
// Allocate memory for CPU buffers
chunk_accum_vect_sequences_db_count = (unsigned long int *)_mm_malloc(chunk_count*sizeof(unsigned long int), 32);
chunk_accum_vect_sequences_db_count[0] = 0;
for (i=1; i<chunk_count ; i++)
chunk_accum_vect_sequences_db_count[i] = chunk_accum_vect_sequences_db_count[i-1] + chunk_vect_sequences_db_count[i-1];
// allow nested parallelism
omp_set_nested(1);
tick = dwalltime();
#pragma omp parallel default(none) shared(queryProfiles,submat, a,m,a_disp,query_sequences_count,b,n,b_disp,vect_sequences_db_count,scores, cpu_block_size, num_mics, mic_threads, open_gap, extend_gap, cpu_threads, qp_count,sp_count, chunk_b, chunk_n, chunk_b_disp, chunk_vD, query_sequences_max_length, sequences_db_max_length, Q, chunk_vect_sequences_db_count, chunk_accum_vect_sequences_db_count, chunk_count, offload_max_vD, offload_max_vect_sequences_db_count, query_length_threshold) num_threads(num_mics+1)
{
// data for MIC thread
__declspec(align(64)) __m512i *mic_row_ptrs[MIC_MAX_NUM_THREADS]={NULL}, *mic_maxCol_ptrs[MIC_MAX_NUM_THREADS]={NULL}, *mic_maxRow_ptrs[MIC_MAX_NUM_THREADS]={NULL}, *mic_lastCol_ptrs[MIC_MAX_NUM_THREADS]={NULL};
__declspec(align(64)) char * mic_scoreProfile_ptrs[MIC_MAX_NUM_THREADS]={NULL};
unsigned long int offload_vD, scores_offset;
unsigned int mic_chunks=0, offload_vect_sequences_db_count;
int * mic_scores;
// data for CPU thread
__m256i *cpu_row1_ptrs[cpu_threads], *cpu_row2_ptrs[cpu_threads], *cpu_row3_ptrs[cpu_threads];
__m256i *cpu_maxCol_ptrs[cpu_threads], *cpu_maxRow_ptrs[cpu_threads], *cpu_lastCol_ptrs[cpu_threads];
char * cpu_scoreProfile_ptrs[cpu_threads];
int cpu_chunks=0;
unsigned int cpu_vect_sequences_db_count;
// common data
unsigned long int i, c;
unsigned int * ptr_chunk_b_disp;
int tid;
unsigned short int * ptr_chunk_n;
char * ptr_chunk_b;
tid = omp_get_thread_num();
if (tid < num_mics){
// allocate buffers for MIC thread
mic_scores = (int*) _mm_malloc(query_sequences_count*(offload_max_vect_sequences_db_count*CPU_AVX2_INT8_VECTOR_LENGTH)*sizeof(int), 64);
// pre-allocate buffers and transfer common thread data to corresponding MIC
#pragma offload_transfer target(mic:tid) in(submat: length(BLOSUM_ELEMS) ALLOC) in(queryProfiles: length(Q*BLOSUM_COLS) ALLOC) \
in(a: length(Q) ALLOC) in(m:length(query_sequences_count) ALLOC) in(a_disp: length(query_sequences_count) ALLOC) \
nocopy(b:length(offload_max_vD) ALLOC) nocopy(n:length(offload_max_vect_sequences_db_count) ALLOC) nocopy(b_disp: length(offload_max_vect_sequences_db_count) ALLOC) \
nocopy(mic_scores: length(query_sequences_count*offload_max_vect_sequences_db_count*CPU_AVX2_INT8_VECTOR_LENGTH) ALLOC) \
nocopy(mic_row_ptrs, mic_maxCol_ptrs, mic_maxRow_ptrs, mic_lastCol_ptrs, mic_scoreProfile_ptrs: ALLOC)
}
// distribute database chunk between MICs and CPU using dynamic scheduling
#pragma omp for schedule(dynamic) nowait
for (c=0; c < chunk_count ; c++) {
ptr_chunk_b = chunk_b[c];
ptr_chunk_n = chunk_n[c];
ptr_chunk_b_disp = chunk_b_disp[c];
scores_offset = chunk_accum_vect_sequences_db_count[c];
if (tid < num_mics){ // MIC thread
offload_vD = chunk_vD[c];
offload_vect_sequences_db_count = chunk_vect_sequences_db_count[c];
// process database chunk in MIC
#pragma offload target(mic:tid) in(ptr_chunk_b[0:offload_vD] : into(b[0:offload_vD]) REUSE) \
in(ptr_chunk_n[0:offload_vect_sequences_db_count] : into(n[0:offload_vect_sequences_db_count]) REUSE) \
in(ptr_chunk_b_disp[0:offload_vect_sequences_db_count] : into(b_disp[0:offload_vect_sequences_db_count]) REUSE) \
out(mic_scores: length(query_sequences_count*offload_vect_sequences_db_count*CPU_AVX2_INT8_VECTOR_LENGTH) REUSE) \
in(a: length(0) REUSE) in(m: length(0) REUSE) in(a_disp: length(0) REUSE) in(submat: length(0) REUSE) in(queryProfiles: length(0) REUSE) \
nocopy(mic_row_ptrs, mic_maxCol_ptrs, mic_maxRow_ptrs, mic_lastCol_ptrs, mic_scoreProfile_ptrs: REUSE)
#pragma omp parallel shared(c, mic_chunks, offload_vect_sequences_db_count, query_sequences_count, open_gap, extend_gap, query_sequences_max_length, sequences_db_max_length, query_length_threshold) num_threads(mic_threads)
{
__m512i *row, *maxCol, *maxRow, *lastCol;
int * ptr_scores;
char * ptr_a, * ptr_b, *ptr_b_block, *ptr_scoreProfile, * scoreProfile, *queryProfile;
__declspec(align(64)) __m512i vzero = _mm512_setzero_epi32(), score, previous, current, aux1, auxLastCol;
__declspec(align(64)) __m512i vextend_gap = _mm512_set1_epi32(extend_gap), vopen_extend_gap = _mm512_set1_epi32(open_gap+extend_gap);
__declspec(align(64)) __m512i v16 = _mm512_set1_epi32(16), submat_hi, submat_lo, b_values;
__mmask16 mask;
unsigned int tid, i, j, jj, k, disp_1, disp_2, disp_3, dim1, nbb;
unsigned long int t, tt, s, q;
tid = omp_get_thread_num();
// if this is the first offload, allocate auxiliary buffers
if (mic_chunks == 0) {
mic_row_ptrs[tid] = (__m512i *) _mm_malloc((MIC_KNC_BLOCK_SIZE+1)*sizeof(__m512i), 64);
mic_maxCol_ptrs[tid] = (__m512i *) _mm_malloc((MIC_KNC_BLOCK_SIZE+1)*sizeof(__m512i), 64);
mic_maxRow_ptrs[tid] = (__m512i *) _mm_malloc((query_sequences_max_length)*sizeof(__m512i), 64);
mic_lastCol_ptrs[tid] = (__m512i *) _mm_malloc((query_sequences_max_length)*sizeof(__m512i), 64);
if (query_sequences_max_length >= query_length_threshold)
mic_scoreProfile_ptrs[tid] = (char *) _mm_malloc(BLOSUM_ROWS_x_MIC_KNC_INT32_VECTOR_LENGTH*MIC_KNC_BLOCK_SIZE*sizeof(char),16);
}
row = mic_row_ptrs[tid];
maxCol = mic_maxCol_ptrs[tid];
maxRow = mic_maxRow_ptrs[tid];
lastCol = mic_lastCol_ptrs[tid];
scoreProfile = mic_scoreProfile_ptrs[tid];
// calculate chunk alignments using query profile technique
#pragma omp for schedule(dynamic) nowait
for (t=0; t< qp_count*offload_vect_sequences_db_count; t++) {
q = (qp_count-1) - (t % qp_count);
s = (offload_vect_sequences_db_count-1) - (t / qp_count);
queryProfile = queryProfiles + a_disp[q]*BLOSUM_COLS;
for (tt = 0; tt < MIC_KNC_INT32_TO_CPU_AVX2_INT8_ADAPT_FACTOR; tt++) {
ptr_b = b + b_disp[s] + tt* MIC_KNC_INT32_VECTOR_LENGTH;
ptr_scores = mic_scores + (q*offload_vect_sequences_db_count+s)*CPU_AVX2_INT8_VECTOR_LENGTH + tt* MIC_KNC_INT32_VECTOR_LENGTH;
// init buffers
#pragma unroll(MIC_KNC_UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm512_setzero_epi32(); // index 0 is not used
#pragma unroll(MIC_KNC_UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm512_setzero_epi32();
// set score to 0
score = _mm512_setzero_epi32();
// calculate number of blocks
nbb = ceil( (double) n[s] / (double) MIC_KNC_BLOCK_SIZE);
for (k=0; k < nbb; k++){
// calculate dim1
disp_1 = k*MIC_KNC_BLOCK_SIZE;
dim1 = (MIC_KNC_BLOCK_SIZE < n[s]-disp_1 ? MIC_KNC_BLOCK_SIZE : n[s]-disp_1);
// get b block
ptr_b_block = ptr_b + disp_1*CPU_AVX2_INT8_VECTOR_LENGTH;
// init buffers
#pragma unroll(MIC_KNC_UNROLL_COUNT)
for (i=1; i<dim1+1 ; i++ ) maxCol[i] = _mm512_setzero_epi32(); //index 0 is not used
#pragma unroll(MIC_KNC_UNROLL_COUNT)
for (i=0; i<dim1 ; i++ ) row[i] = _mm512_setzero_epi32();
auxLastCol = _mm512_setzero_epi32();
for( i =0; i < m[q]; i++){
// previous must start in 0
previous = _mm512_setzero_epi32();
// update row[0] with lastCol elements
row[0] = lastCol[i];
// load submat values corresponding to current a residue
disp_1 = i*BLOSUM_COLS;
#if __MIC__
submat_lo = _mm512_extload_epi32(queryProfile+disp_1, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, 0);
submat_hi = _mm512_extload_epi32(queryProfile+disp_1+16, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, 0);
#endif
#pragma unroll(MIC_KNC_UNROLL_COUNT)
for( j = 1; j < dim1+1; j++) {
//calcuate the diagonal value
#if __MIC__
b_values = _mm512_extload_epi32(ptr_b_block+(j-1)*CPU_AVX2_INT8_VECTOR_LENGTH, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, 0);
#endif
mask = _mm512_cmpge_epi32_mask(b_values,v16);
aux1 = _mm512_permutevar_epi32(b_values, submat_lo);
aux1 = _mm512_mask_permutevar_epi32(aux1, mask, b_values, submat_hi);
current = _mm512_add_epi32(row[j-1], aux1);
// calculate current max value
current = _mm512_max_epi32(current, maxRow[i]);
current = _mm512_max_epi32(current, maxCol[j]);
current = _mm512_max_epi32(current, vzero);
// update maxRow and maxCol
maxRow[i] = _mm512_sub_epi32(maxRow[i], vextend_gap);
maxCol[j] = _mm512_sub_epi32(maxCol[j], vextend_gap);
aux1 = _mm512_sub_epi32(current, vopen_extend_gap);
maxRow[i] = _mm512_max_epi32(maxRow[i], aux1);
maxCol[j] = _mm512_max_epi32(maxCol[j], aux1);
// update row buffer
row[j-1] = previous;
previous = current;
// update max score
score = _mm512_max_epi32(score,current);
}
// update lastCol
lastCol[i] = auxLastCol;
auxLastCol = current;
}
}
// store max value
_mm512_store_epi32(ptr_scores, score);
}
}
// calculate chunk alignments using score profile technique
#pragma omp for schedule(dynamic) nowait
for (t=0; t< sp_count*offload_vect_sequences_db_count; t++) {
q = qp_count + (sp_count-1) - (t % sp_count);
s = (offload_vect_sequences_db_count-1) - (t / sp_count);
ptr_a = a + a_disp[q];
for (tt=0; tt<MIC_KNC_INT32_TO_CPU_AVX2_INT8_ADAPT_FACTOR; tt++){
ptr_b = b + b_disp[s] + tt* MIC_KNC_INT32_VECTOR_LENGTH;
ptr_scores = mic_scores + (q*offload_vect_sequences_db_count+s)*CPU_AVX2_INT8_VECTOR_LENGTH + tt* MIC_KNC_INT32_VECTOR_LENGTH;
// init buffers
#pragma unroll(MIC_KNC_UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm512_setzero_epi32(); // index 0 is not used
#pragma unroll(MIC_KNC_UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm512_setzero_epi32();
// set score to 0
score = _mm512_setzero_epi32();
// calculate number of blocks
nbb = ceil( (double) n[s] / (double) MIC_KNC_BLOCK_SIZE);
for (k=0; k < nbb; k++){
// calculate dim1
disp_2 = k*MIC_KNC_BLOCK_SIZE;
dim1 = (MIC_KNC_BLOCK_SIZE < n[s]-disp_2 ? MIC_KNC_BLOCK_SIZE : n[s]-disp_2);
// init buffers
#pragma unroll(MIC_KNC_UNROLL_COUNT)
for (i=1; i<dim1+1 ; i++ ) maxCol[i] = _mm512_setzero_epi32(); //index 0 is not used
#pragma unroll(MIC_KNC_UNROLL_COUNT)
for (i=0; i<dim1 ; i++ ) row[i] = _mm512_setzero_epi32();
auxLastCol = _mm512_setzero_epi32();
// build score profile
disp_1 = dim1*MIC_KNC_INT32_VECTOR_LENGTH;
for (i=0; i<dim1 ;i++ ) {
#if __MIC__
aux1 = _mm512_extload_epi32(ptr_b+(disp_2+i)*CPU_AVX2_INT8_VECTOR_LENGTH, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, 0);
#endif
disp_3 = i*MIC_KNC_INT32_VECTOR_LENGTH;
#pragma unroll(MIC_KNC_UNROLL_COUNT)
for (j=0; j< BLOSUM_ROWS-1; j++) {
#if __MIC__
aux2 = _mm512_i32extgather_epi32(aux1, submat + j*BLOSUM_COLS, _MM_UPCONV_EPI32_SINT8 , 1, 0);
_mm512_extstore_epi32(scoreProfile+disp_3+j*disp_1, aux2, _MM_DOWNCONV_EPI32_SINT8 , _MM_HINT_NONE );
#endif
}
#if __MIC__
_mm512_extstore_epi32(scoreProfile+disp_3+(BLOSUM_ROWS-1)*disp_1, vzero, _MM_DOWNCONV_EPI32_SINT8 , _MM_HINT_NONE );
#endif
}
for( i = 0; i < m[q]; i++){
// previous must start in 0
previous = _mm512_setzero_epi32();
// update row[0] with lastCol elements
row[0] = lastCol[i];
// calculate i displacement
ptr_scoreProfile = scoreProfile + ((int)(ptr_a[i]))*disp_1;
#pragma unroll(MIC_KNC_UNROLL_COUNT)
for( j=(k*MIC_KNC_BLOCK_SIZE)+1, jj=1; jj < dim1+1; j++, jj++) {
//calcuate the diagonal value
#if __MIC__
current = _mm512_add_epi32(row[jj-1], _mm512_extload_epi32(ptr_scoreProfile+(jj-1)*MIC_KNC_INT32_VECTOR_LENGTH, _MM_UPCONV_EPI32_SINT8, _MM_BROADCAST32_NONE, 0));
#endif
// calculate current max value
current = _mm512_max_epi32(current, maxRow[i]);
current = _mm512_max_epi32(current, maxCol[jj]);
current = _mm512_max_epi32(current, vzero);
// update maxRow and maxCol
maxRow[i] = _mm512_sub_epi32(maxRow[i], vextend_gap);
maxCol[jj] = _mm512_sub_epi32(maxCol[jj], vextend_gap);
aux1 = _mm512_sub_epi32(current, vopen_extend_gap);
maxRow[i] = _mm512_max_epi32(maxRow[i], aux1);
maxCol[jj] = _mm512_max_epi32(maxCol[jj], aux1);
// update row buffer
row[jj-1] = previous;
previous = current;
// update max score
score = _mm512_max_epi32(score,current);
}
// update lastCol
lastCol[i] = auxLastCol;
auxLastCol = current;
}
}
// store max value
_mm512_store_epi32(ptr_scores, score);
}
}
}
// copy scores from auxiliary buffer to final buffer
for (i=0; i<query_sequences_count ; i++)
memcpy(scores+(i*vect_sequences_db_count+scores_offset)*CPU_AVX2_INT8_VECTOR_LENGTH,mic_scores+i*offload_vect_sequences_db_count*CPU_AVX2_INT8_VECTOR_LENGTH,offload_vect_sequences_db_count*CPU_AVX2_INT8_VECTOR_LENGTH*sizeof(int));
mic_chunks++;
} else {
cpu_vect_sequences_db_count = chunk_vect_sequences_db_count[c];
// process database chunk in CPU
#pragma omp parallel num_threads(cpu_threads-num_mics)
{
__m256i *row1, *row2, *row3, *maxCol, *maxRow, *lastCol, * ptr_scores, * tmp_row, *ptr_scoreProfile1, *ptr_scoreProfile2;
__m128i *tmp;
char * ptr_a, * ptr_b, * scoreProfile;
__declspec(align(32)) __m256i score, current, auxLastCol, b_values, blosum_lo, blosum_hi;
__declspec(align(32)) __m256i aux0, aux1, aux2, aux3, aux4, aux5, aux6;
__declspec(align(32)) __m256i vextend_gap_epi8 = _mm256_set1_epi8(extend_gap), vopen_extend_gap_epi8 = _mm256_set1_epi8(open_gap+extend_gap);
__declspec(align(32)) __m256i vextend_gap_epi16 = _mm256_set1_epi16(extend_gap), vopen_extend_gap_epi16 = _mm256_set1_epi16(open_gap+extend_gap);
__declspec(align(32)) __m256i vextend_gap_epi32 = _mm256_set1_epi32(extend_gap), vopen_extend_gap_epi32 = _mm256_set1_epi32(open_gap+extend_gap);
__declspec(align(32)) __m256i vzero_epi8 = _mm256_set1_epi8(0), vzero_epi16 = _mm256_set1_epi16(0), vzero_epi32 = _mm256_set1_epi32(0);
__declspec(align(32)) __m256i v15 = _mm256_set1_epi8(15), vneg32 = _mm256_set1_epi8(-32), v16 = _mm256_set1_epi8(16);
__declspec(align(32)) __m256i v127 = _mm256_set1_epi8(127), v32767 = _mm256_set1_epi16(32767);
__declspec(align(32)) __m128i aux, auxBlosum[2];
unsigned int i, j, ii, jj, k, disp_1, disp_2, disp_3, disp_4, dim1, dim2, nbb;
unsigned long int t, s, q;
int tid, overflow_flag, bb1, bb2, bb1_start, bb1_end, bb2_start, bb2_end;
tid = omp_get_thread_num();
// allocate memory for auxiliary buffers
if (cpu_chunks == 0){
cpu_row1_ptrs[tid] = (__m256i *) _mm_malloc((cpu_block_size+1)*sizeof(__m256i), 32);
cpu_row2_ptrs[tid] = (__m256i *) _mm_malloc((cpu_block_size+1)*sizeof(__m256i), 32);
cpu_row3_ptrs[tid] = (__m256i *) _mm_malloc((cpu_block_size+1)*sizeof(__m256i), 32);
cpu_maxCol_ptrs[tid] = (__m256i *) _mm_malloc((cpu_block_size+1)*sizeof(__m256i), 32);
cpu_maxRow_ptrs[tid] = (__m256i *) _mm_malloc((query_sequences_max_length)*sizeof(__m256i), 32);
cpu_lastCol_ptrs[tid] = (__m256i *) _mm_malloc((query_sequences_max_length)*sizeof(__m256i), 32);
cpu_scoreProfile_ptrs[tid] = (char *) _mm_malloc((BLOSUM_ROWS_x_CPU_AVX2_INT8_VECTOR_LENGTH*cpu_block_size)*sizeof(char), 32);
}
row1 = cpu_row1_ptrs[tid];
row2 = cpu_row2_ptrs[tid];
row3 = cpu_row3_ptrs[tid];
maxCol = cpu_maxCol_ptrs[tid];
maxRow = cpu_maxRow_ptrs[tid];
lastCol = cpu_lastCol_ptrs[tid];
scoreProfile = cpu_scoreProfile_ptrs[tid];
// calculate alignment score
#pragma omp for schedule(dynamic) nowait
for (t=0; t< query_sequences_count*cpu_vect_sequences_db_count; t++) {
q = (query_sequences_count-1) - (t % query_sequences_count);
s = (cpu_vect_sequences_db_count-1) - (t / query_sequences_count);
ptr_a = a + a_disp[q];
ptr_b = ptr_chunk_b + ptr_chunk_b_disp[s];
ptr_scores = (__m256i *) (scores + (q*vect_sequences_db_count+scores_offset+s)*CPU_AVX2_INT8_VECTOR_LENGTH);
// caluclate number of blocks
nbb = ceil( (double) ptr_chunk_n[s] / (double) cpu_block_size);
// init buffers
#pragma unroll(CPU_AVX2_UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm256_set1_epi8(0);
#pragma unroll(CPU_AVX2_UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm256_set1_epi8(0);
// set score to 0
score = _mm256_set1_epi8(0);
for (k=0; k < nbb; k++){
// calculate dim1
disp_4 = k*cpu_block_size;
dim1 = ptr_chunk_n[s]-disp_4;
dim1 = (cpu_block_size < dim1 ? cpu_block_size : dim1);
// calculate dim2
dim2 = dim1 / SEQ_LEN_MULT;
// init buffers
#pragma unroll(CPU_AVX2_UNROLL_COUNT)
for (i=0; i<dim1+1 ; i++ ) maxCol[i] = _mm256_set1_epi8(0);
#pragma unroll(CPU_AVX2_UNROLL_COUNT)
for (i=0; i<dim1+1 ; i++ ) row1[i] = _mm256_set1_epi8(0);
auxLastCol = _mm256_set1_epi8(0);
// calculate a[i] displacement
disp_1 = dim1*CPU_AVX2_INT8_VECTOR_LENGTH;
// build score profile
for (i=0; i< dim1 ;i++ ) {
// indexes
b_values = _mm256_loadu_si256((__m256i *) (ptr_b + (disp_4+i)*CPU_AVX2_INT8_VECTOR_LENGTH));
// indexes >= 16
aux1 = _mm256_sub_epi8(b_values, v16);
// indexes < 16
aux2 = _mm256_cmpgt_epi8(b_values,v15);
aux3 = _mm256_and_si256(aux2,vneg32);
aux4 = _mm256_add_epi8(b_values,aux3);
ptr_scoreProfile1 = (__m256i *)(scoreProfile) + i;
for (j=0; j< BLOSUM_ROWS-1; j++) {
tmp = (__m128i*) (submat + j*BLOSUM_COLS);
auxBlosum[0] = _mm_load_si128(tmp);
auxBlosum[1] = _mm_load_si128(tmp+1);
blosum_lo = _mm256_loadu2_m128i(&auxBlosum[0], &auxBlosum[0]);
blosum_hi = _mm256_loadu2_m128i(&auxBlosum[1], &auxBlosum[1]);
aux5 = _mm256_shuffle_epi8(blosum_lo,aux4);
aux6 = _mm256_shuffle_epi8(blosum_hi,aux1);
_mm256_store_si256(ptr_scoreProfile1+j*dim1,_mm256_or_si256(aux5,aux6));
}
_mm256_store_si256(ptr_scoreProfile1+(BLOSUM_ROWS-1)*dim1,vzero_epi8);
}
for( i = 0; i < m[q]; i+=2){
// update row[0] with lastCol[i-1]
row1[0] = lastCol[i];
row2[0] = lastCol[i+1];
// calculate score profile displacement
ptr_scoreProfile1 = (__m256i *)(scoreProfile+((unsigned int)(ptr_a[i]))*disp_1);
ptr_scoreProfile2 = (__m256i *)(scoreProfile+((unsigned int)(ptr_a[i+1]))*disp_1);
// store maxRow in auxiliars
aux1 = maxRow[i];
aux2 = maxRow[i+1];
for (ii=0; ii<dim2 ; ii++) {
#pragma unroll(SEQ_LEN_MULT)
for( j=ii*SEQ_LEN_MULT+1, jj=0; jj < SEQ_LEN_MULT; jj++, j++) {
//calcuate the diagonal value
current = _mm256_adds_epi8(row1[j-1], _mm256_load_si256(ptr_scoreProfile1+(j-1)));
// calculate current max value
current = _mm256_max_epi8(current, aux1);
current = _mm256_max_epi8(current, maxCol[j]);
current = _mm256_max_epi8(current, vzero_epi8);
// update maxRow and maxCol
aux1 = _mm256_subs_epi8(aux1, vextend_gap_epi8);
maxCol[j] = _mm256_subs_epi8(maxCol[j], vextend_gap_epi8);
aux0 = _mm256_subs_epi8(current, vopen_extend_gap_epi8);
aux1 = _mm256_max_epi8(aux1, aux0);
maxCol[j] = _mm256_max_epi8(maxCol[j], aux0);
// update row buffer
row2[j] = current;
// update max score
score = _mm256_max_epi8(score,current);
//calcuate the diagonal value
current = _mm256_adds_epi8(row2[j-1], _mm256_load_si256(ptr_scoreProfile2+(j-1)));
// calculate current max value
current = _mm256_max_epi8(current, aux2);
current = _mm256_max_epi8(current, maxCol[j]);
current = _mm256_max_epi8(current, vzero_epi8);
// update maxRow and maxCol
aux2 = _mm256_subs_epi8(aux2, vextend_gap_epi8);
maxCol[j] = _mm256_subs_epi8(maxCol[j], vextend_gap_epi8);
aux0 = _mm256_subs_epi8(current, vopen_extend_gap_epi8);
aux2 = _mm256_max_epi8(aux2, aux0);
maxCol[j] = _mm256_max_epi8(maxCol[j], aux0);
// update row buffer
row3[j] = current;
// update max score
score = _mm256_max_epi8(score,current);
}
}
// update maxRow
maxRow[i] = aux1;
maxRow[i+1] = aux2;
// update lastCol
lastCol[i] = auxLastCol;
lastCol[i+1] = row2[dim1];
auxLastCol = current;
// swap buffers
tmp_row = row1;
row1 = row3;
row3 = tmp_row;
}
}
// store max value
aux = _mm256_extracti128_si256 (score,0);
_mm256_store_si256 (ptr_scores,_mm256_cvtepi8_epi32(aux));
_mm256_store_si256 (ptr_scores+1,_mm256_cvtepi8_epi32(_mm_srli_si128(aux,8)));
aux = _mm256_extracti128_si256 (score,1);
_mm256_store_si256 (ptr_scores+2,_mm256_cvtepi8_epi32(aux));
_mm256_store_si256 (ptr_scores+3,_mm256_cvtepi8_epi32(_mm_srli_si128(aux,8)));
// overflow detection
aux1 = _mm256_cmpeq_epi8(score,v127);
overflow_flag = _mm256_testz_si256(aux1,v127);
// if overflow
if (overflow_flag == 0){
// check overflow in low 16 bits
aux1 = _mm256_cmpeq_epi8(_mm256_inserti128_si256(vzero_epi8,_mm256_extracti128_si256(score,0),0),v127);
bb1_start = _mm256_testz_si256(aux1,v127);
// check overflow in high 16 bits
aux1 = _mm256_cmpeq_epi8(_mm256_inserti128_si256(vzero_epi8,_mm256_extracti128_si256(score,1),0),v127);
bb1_end = 2 - _mm256_testz_si256(aux1,v127);
// recalculate using 16-bit signed integer precision
for (bb1=bb1_start; bb1<bb1_end ; bb1++){
// init buffers
#pragma unroll(CPU_AVX2_UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm256_set1_epi16(0);
#pragma unroll(CPU_AVX2_UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm256_set1_epi16(0);
// set score to 0
score = _mm256_set1_epi16(0);
disp_2 = bb1*CPU_AVX2_INT16_VECTOR_LENGTH;
for (k=0; k < nbb; k++){
// calculate dim1
disp_4 = k*cpu_block_size;
dim1 = ptr_chunk_n[s]-disp_4;
dim1 = (cpu_block_size < dim1 ? cpu_block_size : dim1);
// calculate dim2
dim2 = dim1 / SEQ_LEN_MULT;
// init buffers
#pragma unroll(CPU_AVX2_UNROLL_COUNT)
for (i=0; i<dim1+1 ; i++ ) maxCol[i] = _mm256_set1_epi16(0);
#pragma unroll(CPU_AVX2_UNROLL_COUNT)
for (i=0; i<dim1+1 ; i++ ) row1[i] = _mm256_set1_epi16(0);
auxLastCol = _mm256_set1_epi16(0);
// calculate a[i] displacement
disp_1 = dim1*CPU_AVX2_INT8_VECTOR_LENGTH;
// build score profile
for (i=0; i< dim1 ;i++ ) {
// indexes
b_values = _mm256_loadu_si256((__m256i *) (ptr_b + (disp_4+i)*CPU_AVX2_INT8_VECTOR_LENGTH));
// indexes >= 16
aux1 = _mm256_sub_epi8(b_values, v16);
// indexes < 16
aux2 = _mm256_cmpgt_epi8(b_values,v15);
aux3 = _mm256_and_si256(aux2,vneg32);
aux4 = _mm256_add_epi8(b_values,aux3);
ptr_scoreProfile1 = (__m256i *)(scoreProfile) + i;
for (j=0; j< BLOSUM_ROWS-1; j++) {
tmp = (__m128i*) (submat + j*BLOSUM_COLS);
auxBlosum[0] = _mm_load_si128(tmp);
auxBlosum[1] = _mm_load_si128(tmp+1);
blosum_lo = _mm256_loadu2_m128i(&auxBlosum[0], &auxBlosum[0]);
blosum_hi = _mm256_loadu2_m128i(&auxBlosum[1], &auxBlosum[1]);
aux5 = _mm256_shuffle_epi8(blosum_lo,aux4);
aux6 = _mm256_shuffle_epi8(blosum_hi,aux1);
_mm256_store_si256(ptr_scoreProfile1+j*dim1,_mm256_or_si256(aux5,aux6));
}
_mm256_store_si256(ptr_scoreProfile1+(BLOSUM_ROWS-1)*dim1,vzero_epi8);
}
for( i = 0; i < m[q]; i+=2){
// update row[0] with lastCol[i-1]
row1[0] = lastCol[i];
row2[0] = lastCol[i+1];
// calculate score profile displacement
ptr_scoreProfile1 = (__m256i*)(scoreProfile+((int)(ptr_a[i]))*disp_1+disp_2);
ptr_scoreProfile2 = (__m256i*)(scoreProfile+((int)(ptr_a[i+1]))*disp_1+disp_2);
// store maxRow in auxiliars
aux1 = maxRow[i];
aux2 = maxRow[i+1];
for (ii=0; ii<dim2 ; ii++) {
#pragma unroll(SEQ_LEN_MULT)
for( j=ii*SEQ_LEN_MULT+1, jj=0; jj < SEQ_LEN_MULT; jj++, j++) {
//calcuate the diagonal value
current = _mm256_adds_epi16(row1[j-1], _mm256_cvtepi8_epi16(_mm_loadu_si128((__m128i *) (ptr_scoreProfile1+(j-1)))));
// calculate current max value
current = _mm256_max_epi16(current, aux1);
current = _mm256_max_epi16(current, maxCol[j]);
current = _mm256_max_epi16(current, vzero_epi16);
// update maxRow and maxCol
aux1 = _mm256_subs_epi16(aux1, vextend_gap_epi16);
maxCol[j] = _mm256_subs_epi16(maxCol[j], vextend_gap_epi16);
aux0 = _mm256_subs_epi16(current, vopen_extend_gap_epi16);
aux1 = _mm256_max_epi16(aux1, aux0);
maxCol[j] = _mm256_max_epi16(maxCol[j], aux0);
// update row buffer
row2[j] = current;
// update max score
score = _mm256_max_epi16(score,current);
//calcuate the diagonal value
current = _mm256_adds_epi16(row2[j-1], _mm256_cvtepi8_epi16(_mm_loadu_si128((__m128i *) (ptr_scoreProfile2+(j-1)))));
// calculate current max value
current = _mm256_max_epi16(current, aux2);
current = _mm256_max_epi16(current, maxCol[j]);
current = _mm256_max_epi16(current, vzero_epi16);
// update maxRow and maxCol
aux2 = _mm256_subs_epi16(aux2, vextend_gap_epi16);
maxCol[j] = _mm256_subs_epi16(maxCol[j], vextend_gap_epi16);
aux0 = _mm256_subs_epi16(current, vopen_extend_gap_epi16);
aux2 = _mm256_max_epi16(aux2, aux0);
maxCol[j] = _mm256_max_epi16(maxCol[j], aux0);
// update row buffer
row3[j] = current;
// update max score
score = _mm256_max_epi16(score,current);
}
}
// update maxRow
maxRow[i] = aux1;
maxRow[i+1] = aux2;
// update lastCol
lastCol[i] = auxLastCol;
lastCol[i+1] = row2[dim1];
auxLastCol = current;
// swap buffers
tmp_row = row1;
row1 = row3;
row3 = tmp_row;
}
}
// store max value
aux = _mm256_extracti128_si256 (score,0);
_mm256_store_si256 (ptr_scores+bb1*2,_mm256_cvtepi16_epi32(aux));
aux = _mm256_extracti128_si256 (score,1);
_mm256_store_si256 (ptr_scores+bb1*2+1,_mm256_cvtepi16_epi32(aux));
// overflow detection
aux1 = _mm256_cmpeq_epi16(score,v32767);
overflow_flag = _mm256_testz_si256(aux1,v32767);
// if overflow
if (overflow_flag == 0){
// check overflow in low 16 bits
aux1 = _mm256_cmpeq_epi16(_mm256_inserti128_si256(vzero_epi16,_mm256_extracti128_si256(score,0),0),v32767);
bb2_start = _mm256_testz_si256(aux1,v32767);
// check overflow in high 16 bits
aux1 = _mm256_cmpeq_epi16(_mm256_inserti128_si256(vzero_epi16,_mm256_extracti128_si256(score,1),0),v32767);
bb2_end = 2 - _mm256_testz_si256(aux1,v32767);
// recalculate using 32-bit signed integer precision
for (bb2=bb2_start; bb2<bb2_end ; bb2++){
// init buffers
#pragma unroll(CPU_AVX2_UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm256_set1_epi32(0);
#pragma unroll(CPU_AVX2_UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm256_set1_epi32(0);
// set score to 0
score = _mm256_set1_epi32(0);
disp_3 = disp_2 + bb2*CPU_AVX2_INT32_VECTOR_LENGTH;
for (k=0; k < nbb; k++){
// calculate dim1
disp_4 = k*cpu_block_size;
dim1 = ptr_chunk_n[s]-disp_4;
dim1 = (cpu_block_size < dim1 ? cpu_block_size : dim1);
// calculate dim2
dim2 = dim1 / SEQ_LEN_MULT;
// init buffers
#pragma unroll(CPU_AVX2_UNROLL_COUNT)
for (i=0; i<dim1+1 ; i++ ) maxCol[i] = _mm256_set1_epi32(0);
#pragma unroll(CPU_AVX2_UNROLL_COUNT)
for (i=0; i<dim1+1 ; i++ ) row1[i] = _mm256_set1_epi32(0);
auxLastCol = _mm256_set1_epi32(0);
// calculate a[i] displacement
disp_1 = dim1*CPU_AVX2_INT8_VECTOR_LENGTH;
// build score profile
for (i=0; i< dim1 ;i++ ) {
// indexes
b_values = _mm256_loadu_si256((__m256i *) (ptr_b + (disp_4+i)*CPU_AVX2_INT8_VECTOR_LENGTH));
// indexes >= 16
aux1 = _mm256_sub_epi8(b_values, v16);
// indexes < 16
aux2 = _mm256_cmpgt_epi8(b_values,v15);
aux3 = _mm256_and_si256(aux2,vneg32);
aux4 = _mm256_add_epi8(b_values,aux3);
ptr_scoreProfile1 = (__m256i *)(scoreProfile) + i;
for (j=0; j< BLOSUM_ROWS-1; j++) {
tmp = (__m128i*) (submat + j*BLOSUM_COLS);
auxBlosum[0] = _mm_load_si128(tmp);
auxBlosum[1] = _mm_load_si128(tmp+1);
blosum_lo = _mm256_loadu2_m128i(&auxBlosum[0], &auxBlosum[0]);
blosum_hi = _mm256_loadu2_m128i(&auxBlosum[1], &auxBlosum[1]);
aux5 = _mm256_shuffle_epi8(blosum_lo,aux4);
aux6 = _mm256_shuffle_epi8(blosum_hi,aux1);
_mm256_store_si256(ptr_scoreProfile1+j*dim1,_mm256_or_si256(aux5,aux6));
}
_mm256_store_si256(ptr_scoreProfile1+(BLOSUM_ROWS-1)*dim1,vzero_epi8);
}
for( i = 0; i < m[q]; i+=2){
// update row[0] with lastCol[i-1]
row1[0] = lastCol[i];
row2[0] = lastCol[i+1];
// calculate score profile displacement
ptr_scoreProfile1 = (__m256i *)(scoreProfile+((unsigned int)(ptr_a[i]))*disp_1+disp_3);
ptr_scoreProfile2 = (__m256i *)(scoreProfile+((unsigned int)(ptr_a[i+1]))*disp_1+disp_3);
// store maxRow in auxiliars
aux1 = maxRow[i];
aux2 = maxRow[i+1];
for (ii=0; ii<dim2 ; ii++) {
#pragma unroll(SEQ_LEN_MULT)
for( j=ii*SEQ_LEN_MULT+1, jj=0; jj < SEQ_LEN_MULT; jj++, j++) {
//calcuate the diagonal value
current = _mm256_add_epi32(row1[j-1], _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (ptr_scoreProfile1+(j-1)))));
// calculate current max value
current = _mm256_max_epi32(current, aux1);
current = _mm256_max_epi32(current, maxCol[j]);
current = _mm256_max_epi32(current, vzero_epi32);
// update maxRow and maxCol
aux1 = _mm256_sub_epi32(aux1, vextend_gap_epi32);
maxCol[j] = _mm256_sub_epi32(maxCol[j], vextend_gap_epi32);
aux0 = _mm256_sub_epi32(current, vopen_extend_gap_epi32);
aux1 = _mm256_max_epi32(aux1, aux0);
maxCol[j] = _mm256_max_epi32(maxCol[j], aux0);
// update row buffer
row2[j] = current;
// update max score
score = _mm256_max_epi32(score,current);
//calcuate the diagonal value
current = _mm256_add_epi32(row2[j-1], _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (ptr_scoreProfile2+(j-1)))));
// calculate current max value
current = _mm256_max_epi32(current, aux2);
current = _mm256_max_epi32(current, maxCol[j]);
current = _mm256_max_epi32(current, vzero_epi32);
// update maxRow and maxCol
aux2 = _mm256_sub_epi32(aux2, vextend_gap_epi32);
maxCol[j] = _mm256_sub_epi32(maxCol[j], vextend_gap_epi32);
aux0 = _mm256_sub_epi32(current, vopen_extend_gap_epi32);
aux2 = _mm256_max_epi32(aux2, aux0);
maxCol[j] = _mm256_max_epi32(maxCol[j], aux0);
// update row buffer
row3[j] = current;
// update max score
score = _mm256_max_epi32(score,current);
}
}
// update maxRow
maxRow[i] = aux1;
maxRow[i+1] = aux2;
// update lastCol
lastCol[i] = auxLastCol;
lastCol[i+1] = row2[dim1];
auxLastCol = current;
// swap buffers
tmp_row = row1;
row1 = row3;
row3 = tmp_row;
}
// store max value
_mm256_store_si256 (ptr_scores+bb1*2+bb2,score);
}
}
}
}
}
}
}
cpu_chunks++;
}
}
if (tid < num_mics){
// de-allocate buffers in corresponding MIC
#pragma offload_transfer target(mic:tid) nocopy(submat: length(0) FREE) nocopy(queryProfiles: length(0) FREE) \
nocopy(a: length(0) FREE) nocopy(m:length(0) FREE) nocopy(a_disp: length(0) FREE) \
nocopy(b:length(0) FREE) nocopy(n:length(0) FREE) nocopy(b_disp: length(0) FREE) \
nocopy(mic_scores:length(0) FREE) \
nocopy(mic_row_ptrs, mic_maxCol_ptrs, mic_maxRow_ptrs, mic_lastCol_ptrs, mic_scoreProfile_ptrs: FREE)
_mm_free(mic_scores);
} else {
// de-allocate CPU buffers
if (cpu_chunks > 0){
for (i=0; i<cpu_threads-num_mics ; i++){
_mm_free(cpu_row1_ptrs[i]);
_mm_free(cpu_row2_ptrs[i]);
_mm_free(cpu_row3_ptrs[i]);
_mm_free(cpu_maxCol_ptrs[i]);
_mm_free(cpu_maxRow_ptrs[i]);
_mm_free(cpu_lastCol_ptrs[i]);
_mm_free(cpu_scoreProfile_ptrs[i]);
}
}
}
}
*workTime = dwalltime()-tick;
_mm_free(queryProfiles);
} |
dc.c | /*
!-------------------------------------------------------------------------!
! !
! N A S P A R A L L E L B E N C H M A R K S 3.3 !
! !
! O p e n M P V E R S I O N !
! !
! D C !
! !
!-------------------------------------------------------------------------!
! !
! DC creates all specifided data-cube views in parallel. !
! Refer to NAS Technical Report 03-005 for details. !
! It calculates all groupbys in a top down manner using well known !
! heuristics and optimizations. !
! !
! Permission to use, copy, distribute and modify this software !
! for any purpose with or without fee is hereby granted. We !
! request, however, that all derived work reference the NAS !
! Parallel Benchmarks 3.3. This software is provided "as is" !
! without express or implied warranty. !
! !
! Information on NPB 3.3, including the technical report, the !
! original specifications, source code, results and information !
! on how to submit new results, is available at: !
! !
! http://www.nas.nasa.gov/Software/NPB/ !
! !
! Send comments or suggestions to npb@nas.nasa.gov !
! !
! NAS Parallel Benchmarks Group !
! NASA Ames Research Center !
! Mail Stop: T27A-1 !
! Moffett Field, CA 94035-1000 !
! !
! E-mail: npb@nas.nasa.gov !
! Fax: (650) 604-3957 !
! !
!-------------------------------------------------------------------------!
! Author: Michael Frumkin !
! Leonid Shabanov !
!-------------------------------------------------------------------------!
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <ctype.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "adc.h"
#include "macrodef.h"
#include "npbparams.h"
#ifdef UNIX
#include <sys/types.h>
#include <unistd.h>
#define MAX_TIMERS 64 /* NPB maximum timers */
void timer_clear(int);
void timer_start(int);
void timer_stop(int);
double timer_read(int);
#endif
void c_print_results( char *name,
char clss,
int n1,
int n2,
int n3,
int niter,
double t,
double mops,
char *optype,
int passed_verification,
char *npbversion,
char *compiletime,
char *cc,
char *clink,
char *c_lib,
char *c_inc,
char *cflags,
char *clinkflags );
void initADCpar(ADC_PAR *par);
int ParseParFile(char* parfname, ADC_PAR *par);
int GenerateADC(ADC_PAR *par);
void ShowADCPar(ADC_PAR *par);
int32 DC(ADC_VIEW_PARS *adcpp);
int Verify(long long int checksum,ADC_VIEW_PARS *adcpp);
#define BlockSize 1024
int main ( int argc, char * argv[] )
{
ADC_PAR *parp;
ADC_VIEW_PARS *adcpp;
int32 retCode;
fprintf(stdout,"\n\n NAS Parallel Benchmarks (NPB3.3-OMP) - DC Benchmark\n\n" );
if(argc!=3){
fprintf(stdout," No Paramter file. Using compiled defaults\n");
}
if(argc>3 || (argc>1 && !isdigit(argv[1][0]))){
fprintf(stderr,"Usage: <program name> <amount of memory>\n");
fprintf(stderr," <file of parameters>\n");
fprintf(stderr,"Example: bin/dc.S 1000000 DC/ADC.par\n");
fprintf(stderr,"The last argument, (a parameter file) can be skipped\n");
exit(1);
}
if( !(parp = (ADC_PAR*) malloc(sizeof(ADC_PAR)))
||!(adcpp = (ADC_VIEW_PARS*) malloc(sizeof(ADC_VIEW_PARS)))){
PutErrMsg("main: malloc failed")
exit(1);
}
initADCpar(parp);
parp->clss=CLASS;
if(argc!=3){
parp->dim=attrnum;
parp->tuplenum=input_tuples;
}else if( (argc==3)&&(!ParseParFile(argv[2], parp))) {
PutErrMsg("main.ParseParFile failed")
exit(1);
}
ShowADCPar(parp);
if(!GenerateADC(parp)) {
PutErrMsg("main.GenerateAdc failed")
exit(1);
}
adcpp->ndid = parp->ndid;
adcpp->clss = parp->clss;
adcpp->nd = parp->dim;
adcpp->nm = parp->mnum;
adcpp->nTasks = 1;
if(argc>=2)
adcpp->memoryLimit = atoi(argv[1]);
else
adcpp->memoryLimit = 0;
if(adcpp->memoryLimit <= 0){
/* size of rb-tree with tuplenum nodes */
adcpp->memoryLimit = parp->tuplenum*(50+5*parp->dim);
fprintf(stdout,"Estimated rb-tree size = %d \n", adcpp->memoryLimit);
}
adcpp->nInputRecs = parp->tuplenum;
strcpy(adcpp->adcName, parp->filename);
strcpy(adcpp->adcInpFileName, parp->filename);
if((retCode=DC(adcpp))) {
PutErrMsg("main.DC failed")
fprintf(stderr, "main.ParRun failed: retcode = %d\n", retCode);
exit(1);
}
if(parp) { free(parp); parp = 0; }
if(adcpp) { free(adcpp); adcpp = 0; }
return 0;
}
int32 CloseAdcView(ADC_VIEW_CNTL *adccntl);
int32 PartitionCube(ADC_VIEW_CNTL *avp);
ADC_VIEW_CNTL *NewAdcViewCntl(ADC_VIEW_PARS *adcpp, uint32 pnum);
int32 ComputeGivenGroupbys(ADC_VIEW_CNTL *adccntl);
int32 DC(ADC_VIEW_PARS *adcpp) {
int32 itsk=0;
double t_total=0.0;
int verified;
typedef struct {
int verificationFailed;
uint32 totalViewTuples;
uint64 totalViewSizesInBytes;
uint32 totalNumberOfMadeViews;
uint64 checksum;
double tm_max;
} PAR_VIEW_ST;
PAR_VIEW_ST *pvstp;
pvstp = (PAR_VIEW_ST*) malloc(sizeof(PAR_VIEW_ST));
pvstp->verificationFailed = 0;
pvstp->totalViewTuples = 0;
pvstp->totalViewSizesInBytes = 0;
pvstp->totalNumberOfMadeViews = 0;
pvstp->checksum = 0;
#ifdef _OPENMP
adcpp->nTasks=omp_get_max_threads();
fprintf(stdout,"\nNumber of available threads: %d\n", adcpp->nTasks);
if (adcpp->nTasks > MAX_NUMBER_OF_TASKS) {
adcpp->nTasks = MAX_NUMBER_OF_TASKS;
fprintf(stdout,"Warning: Maximum number of tasks reached: %d\n",
adcpp->nTasks);
}
#pragma omp parallel shared(pvstp) private(itsk)
#endif
{
double tm0=0;
int itimer=0;
ADC_VIEW_CNTL *adccntlp;
#ifdef _OPENMP
itsk=omp_get_thread_num();
#endif
adccntlp = NewAdcViewCntl(adcpp, itsk);
if (!adccntlp) {
PutErrMsg("ParRun.NewAdcViewCntl: returned NULL")
adccntlp->verificationFailed=1;
}else{
adccntlp->verificationFailed = 0;
if (adccntlp->retCode!=0) {
fprintf(stderr,
"DC.NewAdcViewCntl: return code = %d\n",
adccntlp->retCode);
}
}
if (!adccntlp->verificationFailed) {
if( PartitionCube(adccntlp) ) {
PutErrMsg("DC.PartitionCube failed");
}
timer_clear(itimer);
timer_start(itimer);
parmacs_roi_begin();
if( ComputeGivenGroupbys(adccntlp) ) {
PutErrMsg("DC.ComputeGivenGroupbys failed");
}
parmacs_roi_end();
timer_stop(itimer);
tm0 = timer_read(itimer);
}
#ifdef _OPENMP
#pragma omp critical
#endif
{
if(pvstp->tm_max<tm0) pvstp->tm_max=tm0;
pvstp->verificationFailed += adccntlp->verificationFailed;
if (!adccntlp->verificationFailed) {
pvstp->totalNumberOfMadeViews += adccntlp->numberOfMadeViews;
pvstp->totalViewSizesInBytes += adccntlp->totalViewFileSize;
pvstp->totalViewTuples += adccntlp->totalOfViewRows;
pvstp->checksum += adccntlp->totchs[0];
}
}
if(CloseAdcView(adccntlp)) {
PutErrMsg("ParRun.CloseAdcView: is failed");
adccntlp->verificationFailed = 1;
}
} /* omp parallel */
t_total=pvstp->tm_max;
pvstp->verificationFailed=Verify(pvstp->checksum,adcpp);
verified = (pvstp->verificationFailed == -1)? -1 :
(pvstp->verificationFailed == 0)? 1 : 0;
fprintf(stdout,"\n*** DC Benchmark Results:\n");
fprintf(stdout," Benchmark Time = %20.3f\n", t_total);
fprintf(stdout," Input Tuples = %12d\n", (int) adcpp->nInputRecs);
fprintf(stdout," Number of Views = %12d\n",
(int) pvstp->totalNumberOfMadeViews);
fprintf(stdout," Number of Tasks = %12d\n", (int) adcpp->nTasks);
fprintf(stdout," Tuples Generated = %20.0f\n",
(double) pvstp->totalViewTuples);
fprintf(stdout," Tuples/s = %20.2f\n",
(double) pvstp->totalViewTuples / t_total);
fprintf(stdout," Checksum = %20.12e\n", (double) pvstp->checksum);
if (pvstp->verificationFailed)
fprintf(stdout, " Verification failed\n");
c_print_results("DC",
adcpp->clss,
(int)adcpp->nInputRecs,
0,
0,
1,
t_total,
(double) pvstp->totalViewTuples * 1.e-6 / t_total,
"Tuples generated",
verified,
NPBVERSION,
COMPILETIME,
CC,
CLINK,
C_LIB,
C_INC,
CFLAGS,
CLINKFLAGS);
return ADC_OK;
}
long long checksumS=464620213;
long long checksumWlo=434318;
long long checksumWhi=1401796;
long long checksumAlo=178042;
long long checksumAhi=7141688;
long long checksumBlo=700453;
long long checksumBhi=9348365;
int Verify(long long int checksum,ADC_VIEW_PARS *adcpp){
switch(adcpp->clss){
case 'S':
if(checksum==checksumS) return 0;
break;
case 'W':
if(checksum==checksumWlo+1000000*checksumWhi) return 0;
break;
case 'A':
if(checksum==checksumAlo+1000000*checksumAhi) return 0;
break;
case 'B':
if(checksum==checksumBlo+1000000*checksumBhi) return 0;
break;
default:
return -1; /* CLASS U */
}
return 1;
}
|
pr66633-3.c | /* PR middle-end/66633 */
/* { dg-do compile } */
/* { dg-options "-fopenmp -O1" } */
void baz (int (*) ());
void
foo (void)
{
int i;
auto int bar (void) { return i; }
auto void bar2 (void)
{
#pragma omp parallel
baz (bar);
}
bar2 ();
}
|
net_sha1_fmt_plug.c | /* Cracker for "Keyed SHA1" network authentication hashes.
*
* This software is Copyright (c) 2013, Dhiru Kholia <dhiru [at] openwall.com>,
* and it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Added linkage to dynamic (type dynamic_40) for any salt 230 bytes or less,
* by Jim Fougeron. Any salts > 239 bytes will still be handled by this full
* format. dynamic is limited to 256 bytes, which 'should' get us 240 bytes
* of salt. I think we might be able to get 239 bytes (due to a few issues).
* 240 byte salts fail. So, for peace of mind, I am limiting to 230 byte salts
* within dynamic.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_netsha1;
#elif FMT_REGISTERS_H
john_register_one(&fmt_netsha1);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#define OMP_SCALE 2048 // XXX
#endif
#include "arch.h"
#include "formats.h"
#include "dynamic.h"
#include "sha.h"
#include "misc.h"
#include "common.h"
#include "params.h"
#include "options.h"
#include "memdbg.h"
#define FORMAT_LABEL "net-sha1"
#define FORMAT_NAME "\"Keyed SHA1\" BFD"
#define FORMAT_TAG "$netsha1$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define ALGORITHM_NAME "SHA1 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 20 // get this right ;)
#define BINARY_SIZE 20
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN MEM_ALIGN_WORD
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define HEXCHARS "0123456789abcdef"
#define MAX_SALT_LEN 1024
static struct fmt_tests tests[] = {
/* Real hashes from Cisco routers ;) */
{"$netsha1$20440a340000000100000000000f4240000f424000000000051c010000000001$709d3307304d790f58bf0a3cefd783b438408996", "password12345"},
{"$netsha1$20440a340000000100000000000f4240000f424000000000051c010000000002$94bce4d9084199508669b39f044064082a093de3", "password12345"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static void get_ptr();
static void init(struct fmt_main *self);
#define MAGIC 0xfe5aa5ef
static struct custom_salt {
ARCH_WORD_32 magic;
int length;
unsigned char salt[MAX_SALT_LEN]; // fixed size, but should be OK
} *cur_salt;
static int dyna_salt_seen=0;
static char Conv_Buf[300]; // max salt length we will pass to dyna is 230. 300 is MORE than enough.
static struct fmt_main *pDynamicFmt, *pNetSha1_Dyna;
/* this function converts a 'native' net-sha1 signature string into a $dynamic_40$ syntax string */
static char *Convert(char *Buf, char *ciphertext)
{
char *cp, *cp2;
if (text_in_dynamic_format_already(pDynamicFmt, ciphertext))
return ciphertext;
cp = strchr(&ciphertext[2], '$');
if (!cp)
return "*";
cp2 = strchr(&cp[1], '$');
if (!cp2)
return "*";
snprintf(Buf, sizeof(Conv_Buf), "$dynamic_40$%s$HEX%*.*s", &cp2[1], (int)(cp2-cp), (int)(cp2-cp), cp);
return Buf;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q = NULL;
int len;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
q = strrchr(ciphertext, '$');
if (!q)
return 0;
q = q + 1;
if ((q - p - 1) > MAX_SALT_LEN * 2)
return 0;
len = strspn(q, HEXCHARS);
if (len != BINARY_SIZE * 2 || len != strlen(q)) {
get_ptr();
return pDynamicFmt->methods.valid(ciphertext, pDynamicFmt);
}
if (strspn(p, HEXCHARS) != q - p - 1)
return 0;
return 1;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char *orig_ct = ciphertext;
int i, len;
memset(&cs, 0, sizeof(cs));
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
ciphertext += TAG_LENGTH;
len = (strrchr(ciphertext, '$') - ciphertext) / 2;
for (i = 0; i < len; i++)
cs.salt[i] = (atoi16[ARCH_INDEX(ciphertext[2 * i])] << 4) |
atoi16[ARCH_INDEX(ciphertext[2 * i + 1])];
if (len < 230) {
// return our memset buffer (putting the dyna salt pointer into it).
// This keeps teh 'pre-cleaned salt() warning from hitting this format)
//return pDynamicFmt->methods.salt(Convert(Conv_Buf, orig_ct));
memcpy((char*)(&cs), pDynamicFmt->methods.salt(Convert(Conv_Buf, orig_ct)), pDynamicFmt->params.salt_size);
dyna_salt_seen=1;
return &cs;
}
cs.magic = MAGIC;
cs.length = len;
return &cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
if (text_in_dynamic_format_already(pDynamicFmt, ciphertext)) {
unsigned char *cp = pDynamicFmt->methods.binary(ciphertext);
memset(out, 0, sizeof(buf.c));
memcpy(out, cp, pDynamicFmt->params.binary_size); // binary size is 16
return out;
}
p = strrchr(ciphertext, '$') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[0](index); return crypt_out[index][0] & 0xf; }
static int get_hash_1(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[1](index); return crypt_out[index][0] & 0xff; }
static int get_hash_2(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[2](index); return crypt_out[index][0] & 0xfff; }
static int get_hash_3(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[3](index); return crypt_out[index][0] & 0xffff; }
static int get_hash_4(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[4](index); return crypt_out[index][0] & 0xfffff; }
static int get_hash_5(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[5](index); return crypt_out[index][0] & 0xffffff; }
static int get_hash_6(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[6](index); return crypt_out[index][0] & 0x7ffffff; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
get_ptr();
if (cur_salt->magic != MAGIC) {
pDynamicFmt->methods.set_salt(salt);
}
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
if (cur_salt->magic != MAGIC) {
return pDynamicFmt->methods.crypt_all(pcount, salt);
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
SHA_CTX ctx;
SHA1_Init(&ctx);
SHA1_Update(&ctx, cur_salt->salt, cur_salt->length);
SHA1_Update(&ctx, saved_key[index], PLAINTEXT_LENGTH);
SHA1_Final((unsigned char*)crypt_out[index], &ctx);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
if (cur_salt->magic != MAGIC) {
return pDynamicFmt->methods.cmp_all(binary, count);
}
for (; index < count; index++)
if (((ARCH_WORD_32*)binary)[0] == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
if (cur_salt->magic != MAGIC) {
return pDynamicFmt->methods.cmp_one(binary, index);
}
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void netsha1_set_key(char *key, int index)
{
if (dyna_salt_seen)
pDynamicFmt->methods.set_key(key, index);
/* strncpy will pad with zeros, which is needed */
strncpy(saved_key[index], key, sizeof(saved_key[0]));
}
static char *get_key(int index)
{
return saved_key[index];
}
static char *prepare(char *fields[10], struct fmt_main *self) {
static char buf[sizeof(cur_salt->salt)*2+TAG_LENGTH+1];
char *hash = fields[1];
if (strncmp(hash, FORMAT_TAG, TAG_LENGTH) && valid(hash, self)) {
get_ptr();
if (text_in_dynamic_format_already(pDynamicFmt, hash))
return hash;
sprintf(buf, "%s%s", FORMAT_TAG, hash);
return buf;
}
return hash;
}
struct fmt_main fmt_netsha1 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
set_salt,
netsha1_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
static void get_ptr() {
if (!pDynamicFmt) {
char *Buf;
pNetSha1_Dyna = mem_alloc_tiny(sizeof(fmt_netsha1), 16);
memcpy(pNetSha1_Dyna, &fmt_netsha1, sizeof(fmt_netsha1));
pDynamicFmt = dynamic_THIN_FORMAT_LINK(pNetSha1_Dyna, Convert(Conv_Buf, tests[1].ciphertext), "net-sha1", 0);
fmt_netsha1.params.min_keys_per_crypt = pDynamicFmt->params.min_keys_per_crypt;
fmt_netsha1.params.max_keys_per_crypt = pDynamicFmt->params.max_keys_per_crypt;
Buf = mem_alloc_tiny(strlen(fmt_netsha1.params.algorithm_name) + 4 + strlen("dynamic_40") + 1, 1);
sprintf(Buf, "%s or %s", fmt_netsha1.params.algorithm_name, "dynamic_40");
fmt_netsha1.params.algorithm_name = Buf;
//pDynamicFmt->methods.init(pDynamicFmt);
}
}
static void init(struct fmt_main *self)
{
// We have to allocate our dyna_40 object first, because we get 'modified' min/max counts from there.
get_ptr();
if (self->private.initialized == 0) {
pDynamicFmt = dynamic_THIN_FORMAT_LINK(pNetSha1_Dyna, Convert(Conv_Buf, tests[1].ciphertext), "net-sha1", 1);
self->private.initialized = 1;
}
saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
#endif /* plugin stanza */
|
seriesSum.c | #include <stdio.h>
#include <math.h>
#include <omp.h>
int main(){
int x, n, i, sum = 1;
omp_set_dynamic(0);
printf("Enter the values\nx = ");
scanf("%d", &x);
printf("N = ");
scanf("%d", &n);
int m = omp_get_num_procs();
omp_set_num_threads(m);
// x^1 + x^2 + x^3 + ... + x^n
#pragma omp parallel for reduction(+:sum)
for(i = 1; i <= n; i++){
sum += pow(x, i);
}
printf("Sum = %d\n", sum);
return 0;
} |
distort.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD IIIII SSSSS TTTTT OOO RRRR TTTTT %
% D D I SS T O O R R T %
% D D I SSS T O O RRRR T %
% D D I SS T O O R R T %
% DDDD IIIII SSSSS T OOO R R T %
% %
% %
% MagickCore Image Distortion Methods %
% %
% Software Design %
% Cristy %
% Anthony Thyssen %
% June 2007 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/distort.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/image.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/list.h"
#include "MagickCore/matrix.h"
#include "MagickCore/matrix-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/shear.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
/*
Numerous internal routines for image distortions.
*/
static inline void AffineArgsToCoefficients(double *affine)
{
/* map external sx,ry,rx,sy,tx,ty to internal c0,c2,c4,c1,c3,c5 */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=affine[1]; tmp[1]=affine[2]; tmp[2]=affine[3]; tmp[3]=affine[4];
affine[3]=tmp[0]; affine[1]=tmp[1]; affine[4]=tmp[2]; affine[2]=tmp[3];
}
static inline void CoefficientsToAffineArgs(double *coeff)
{
/* map internal c0,c1,c2,c3,c4,c5 to external sx,ry,rx,sy,tx,ty */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=coeff[3]; tmp[1]=coeff[1]; tmp[2]=coeff[4]; tmp[3]=coeff[2];
coeff[1]=tmp[0]; coeff[2]=tmp[1]; coeff[3]=tmp[2]; coeff[4]=tmp[3];
}
static void InvertAffineCoefficients(const double *coeff,double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 50 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[1]*coeff[3]);
inverse[0]=determinant*coeff[4];
inverse[1]=determinant*(-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[2]*coeff[4]);
inverse[3]=determinant*(-coeff[3]);
inverse[4]=determinant*coeff[0];
inverse[5]=determinant*(coeff[2]*coeff[3]-coeff[0]*coeff[5]);
}
static void InvertPerspectiveCoefficients(const double *coeff,
double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 53 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[3]*coeff[1]);
inverse[0]=determinant*(coeff[4]-coeff[7]*coeff[5]);
inverse[1]=determinant*(coeff[7]*coeff[2]-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[4]*coeff[2]);
inverse[3]=determinant*(coeff[6]*coeff[5]-coeff[3]);
inverse[4]=determinant*(coeff[0]-coeff[6]*coeff[2]);
inverse[5]=determinant*(coeff[3]*coeff[2]-coeff[0]*coeff[5]);
inverse[6]=determinant*(coeff[3]*coeff[7]-coeff[6]*coeff[4]);
inverse[7]=determinant*(coeff[6]*coeff[1]-coeff[0]*coeff[7]);
}
/*
* Polynomial Term Defining Functions
*
* Order must either be an integer, or 1.5 to produce
* the 2 number_valuesal polynomial function...
* affine 1 (3) u = c0 + c1*x + c2*y
* bilinear 1.5 (4) u = '' + c3*x*y
* quadratic 2 (6) u = '' + c4*x*x + c5*y*y
* cubic 3 (10) u = '' + c6*x^3 + c7*x*x*y + c8*x*y*y + c9*y^3
* quartic 4 (15) u = '' + c10*x^4 + ... + c14*y^4
* quintic 5 (21) u = '' + c15*x^5 + ... + c20*y^5
* number in parenthesis minimum number of points needed.
* Anything beyond quintic, has not been implemented until
* a more automated way of determining terms is found.
* Note the slight re-ordering of the terms for a quadratic polynomial
* which is to allow the use of a bi-linear (order=1.5) polynomial.
* All the later polynomials are ordered simply from x^N to y^N
*/
static size_t poly_number_terms(double order)
{
/* Return the number of terms for a 2d polynomial */
if ( order < 1 || order > 5 ||
( order != floor(order) && (order-1.5) > MagickEpsilon) )
return 0; /* invalid polynomial order */
return((size_t) floor((order+1)*(order+2)/2));
}
static double poly_basis_fn(ssize_t n, double x, double y)
{
/* Return the result for this polynomial term */
switch(n) {
case 0: return( 1.0 ); /* constant */
case 1: return( x );
case 2: return( y ); /* affine order = 1 terms = 3 */
case 3: return( x*y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x*x );
case 5: return( y*y ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x*x );
case 7: return( x*x*y );
case 8: return( x*y*y );
case 9: return( y*y*y ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x*x );
case 11: return( x*x*x*y );
case 12: return( x*x*y*y );
case 13: return( x*y*y*y );
case 14: return( y*y*y*y ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x*x );
case 16: return( x*x*x*x*y );
case 17: return( x*x*x*y*y );
case 18: return( x*x*y*y*y );
case 19: return( x*y*y*y*y );
case 20: return( y*y*y*y*y ); /* quintic order = 5 terms = 21 */
}
return( 0 ); /* should never happen */
}
static const char *poly_basis_str(ssize_t n)
{
/* return the result for this polynomial term */
switch(n) {
case 0: return(""); /* constant */
case 1: return("*ii");
case 2: return("*jj"); /* affine order = 1 terms = 3 */
case 3: return("*ii*jj"); /* bilinear order = 1.5 terms = 4 */
case 4: return("*ii*ii");
case 5: return("*jj*jj"); /* quadratic order = 2 terms = 6 */
case 6: return("*ii*ii*ii");
case 7: return("*ii*ii*jj");
case 8: return("*ii*jj*jj");
case 9: return("*jj*jj*jj"); /* cubic order = 3 terms = 10 */
case 10: return("*ii*ii*ii*ii");
case 11: return("*ii*ii*ii*jj");
case 12: return("*ii*ii*jj*jj");
case 13: return("*ii*jj*jj*jj");
case 14: return("*jj*jj*jj*jj"); /* quartic order = 4 terms = 15 */
case 15: return("*ii*ii*ii*ii*ii");
case 16: return("*ii*ii*ii*ii*jj");
case 17: return("*ii*ii*ii*jj*jj");
case 18: return("*ii*ii*jj*jj*jj");
case 19: return("*ii*jj*jj*jj*jj");
case 20: return("*jj*jj*jj*jj*jj"); /* quintic order = 5 terms = 21 */
}
return( "UNKNOWN" ); /* should never happen */
}
static double poly_basis_dx(ssize_t n, double x, double y)
{
/* polynomial term for x derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 1.0 );
case 2: return( 0.0 ); /* affine order = 1 terms = 3 */
case 3: return( y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x );
case 5: return( 0.0 ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x );
case 7: return( x*y );
case 8: return( y*y );
case 9: return( 0.0 ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x );
case 11: return( x*x*y );
case 12: return( x*y*y );
case 13: return( y*y*y );
case 14: return( 0.0 ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x );
case 16: return( x*x*x*y );
case 17: return( x*x*y*y );
case 18: return( x*y*y*y );
case 19: return( y*y*y*y );
case 20: return( 0.0 ); /* quintic order = 5 terms = 21 */
}
return( 0.0 ); /* should never happen */
}
static double poly_basis_dy(ssize_t n, double x, double y)
{
/* polynomial term for y derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 0.0 );
case 2: return( 1.0 ); /* affine order = 1 terms = 3 */
case 3: return( x ); /* bilinear order = 1.5 terms = 4 */
case 4: return( 0.0 );
case 5: return( y ); /* quadratic order = 2 terms = 6 */
default: return( poly_basis_dx(n-1,x,y) ); /* weird but true */
}
/* NOTE: the only reason that last is not true for 'quadratic'
is due to the re-arrangement of terms to allow for 'bilinear'
*/
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A f f i n e T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AffineTransformImage() transforms an image as dictated by the affine matrix.
% It allocates the memory necessary for the new Image structure and returns
% a pointer to the new image.
%
% The format of the AffineTransformImage method is:
%
% Image *AffineTransformImage(const Image *image,
% AffineMatrix *affine_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o affine_matrix: the affine matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AffineTransformImage(const Image *image,
const AffineMatrix *affine_matrix,ExceptionInfo *exception)
{
double
distort[6];
Image
*deskew_image;
/*
Affine transform image.
*/
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(affine_matrix != (AffineMatrix *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
distort[0]=affine_matrix->sx;
distort[1]=affine_matrix->rx;
distort[2]=affine_matrix->ry;
distort[3]=affine_matrix->sy;
distort[4]=affine_matrix->tx;
distort[5]=affine_matrix->ty;
deskew_image=DistortImage(image,AffineProjectionDistortion,6,distort,
MagickTrue,exception);
return(deskew_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e n e r a t e C o e f f i c i e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GenerateCoefficients() takes user provided input arguments and generates
% the coefficients, needed to apply the specific distortion for either
% distorting images (generally using control points) or generating a color
% gradient from sparsely separated color points.
%
% The format of the GenerateCoefficients() method is:
%
% Image *GenerateCoefficients(const Image *image,DistortMethod method,
% const size_t number_arguments,const double *arguments,
% size_t number_values, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion/ sparse gradient
%
% o number_arguments: the number of arguments given.
%
% o arguments: the arguments for this distortion method.
%
% o number_values: the style and format of given control points, (caller type)
% 0: 2 dimensional mapping of control points (Distort)
% Format: u,v,x,y where u,v is the 'source' of the
% the color to be plotted, for DistortImage()
% N: Interpolation of control points with N values (usally r,g,b)
% Format: x,y,r,g,b mapping x,y to color values r,g,b
% IN future, variable number of values may be given (1 to N)
%
% o exception: return any errors or warnings in this structure
%
% Note that the returned array of double values must be freed by the
% calling method using RelinquishMagickMemory(). This however may change in
% the future to require a more 'method' specific method.
%
% Because of this this method should not be classed as stable or used
% outside other MagickCore library methods.
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
static double *GenerateCoefficients(const Image *image,
DistortMethod *method,const size_t number_arguments,const double *arguments,
size_t number_values,ExceptionInfo *exception)
{
double
*coeff;
register size_t
i;
size_t
number_coeff, /* number of coefficients to return (array size) */
cp_size, /* number floating point numbers per control point */
cp_x,cp_y, /* the x,y indexes for control point */
cp_values; /* index of values for this control point */
/* number_values Number of values given per control point */
if ( number_values == 0 ) {
/* Image distortion using control points (or other distortion)
That is generate a mapping so that x,y->u,v given u,v,x,y
*/
number_values = 2; /* special case: two values of u,v */
cp_values = 0; /* the values i,j are BEFORE the destination CP x,y */
cp_x = 2; /* location of x,y in input control values */
cp_y = 3;
/* NOTE: cp_values, also used for later 'reverse map distort' tests */
}
else {
cp_x = 0; /* location of x,y in input control values */
cp_y = 1;
cp_values = 2; /* and the other values are after x,y */
/* Typically in this case the values are R,G,B color values */
}
cp_size = number_values+2; /* each CP defintion involves this many numbers */
/* If not enough control point pairs are found for specific distortions
fall back to Affine distortion (allowing 0 to 3 point pairs)
*/
if ( number_arguments < 4*cp_size &&
( *method == BilinearForwardDistortion
|| *method == BilinearReverseDistortion
|| *method == PerspectiveDistortion
) )
*method = AffineDistortion;
number_coeff=0;
switch (*method) {
case AffineDistortion:
/* also BarycentricColorInterpolate: */
number_coeff=3*number_values;
break;
case PolynomialDistortion:
/* number of coefficents depend on the given polynomal 'order' */
i = poly_number_terms(arguments[0]);
number_coeff = 2 + i*number_values;
if ( i == 0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Polynomial",
"Invalid order, should be interger 1 to 5, or 1.5");
return((double *) NULL);
}
if ( number_arguments < 1+i*cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Polynomial", (double) i);
return((double *) NULL);
}
break;
case BilinearReverseDistortion:
number_coeff=4*number_values;
break;
/*
The rest are constants as they are only used for image distorts
*/
case BilinearForwardDistortion:
number_coeff=10; /* 2*4 coeff plus 2 constants */
cp_x = 0; /* Reverse src/dest coords for forward mapping */
cp_y = 1;
cp_values = 2;
break;
#if 0
case QuadraterialDistortion:
number_coeff=19; /* BilinearForward + BilinearReverse */
#endif
break;
case ShepardsDistortion:
number_coeff=1; /* The power factor to use */
break;
case ArcDistortion:
number_coeff=5;
break;
case ScaleRotateTranslateDistortion:
case AffineProjectionDistortion:
case Plane2CylinderDistortion:
case Cylinder2PlaneDistortion:
number_coeff=6;
break;
case PolarDistortion:
case DePolarDistortion:
number_coeff=8;
break;
case PerspectiveDistortion:
case PerspectiveProjectionDistortion:
number_coeff=9;
break;
case BarrelDistortion:
case BarrelInverseDistortion:
number_coeff=10;
break;
default:
perror("unknown method given"); /* just fail assertion */
}
/* allocate the array of coefficients needed */
coeff = (double *) AcquireQuantumMemory(number_coeff,sizeof(*coeff));
if (coeff == (double *) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "GenerateCoefficients");
return((double *) NULL);
}
/* zero out coefficients array */
for (i=0; i < number_coeff; i++)
coeff[i] = 0.0;
switch (*method)
{
case AffineDistortion:
{
/* Affine Distortion
v = c0*x + c1*y + c2
for each 'value' given
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Affine", 1.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* handle special cases of not enough arguments */
if ( number_arguments == cp_size ) {
/* Only 1 CP Set Given */
if ( cp_values == 0 ) {
/* image distortion - translate the image */
coeff[0] = 1.0;
coeff[2] = arguments[0] - arguments[2];
coeff[4] = 1.0;
coeff[5] = arguments[1] - arguments[3];
}
else {
/* sparse gradient - use the values directly */
for (i=0; i<number_values; i++)
coeff[i*3+2] = arguments[cp_values+i];
}
}
else {
/* 2 or more points (usally 3) given.
Solve a least squares simultaneous equation for coefficients.
*/
double
**matrix,
**vectors,
terms[3];
MagickBooleanType
status;
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(3UL,3UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*3]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),3UL,number_values);
}
if ( number_arguments == 2*cp_size ) {
/* Only two pairs were given, but we need 3 to solve the affine.
Fake extra coordinates by rotating p1 around p0 by 90 degrees.
x2 = x0 - (y1-y0) y2 = y0 + (x1-x0)
*/
terms[0] = arguments[cp_x]
- ( arguments[cp_size+cp_y] - arguments[cp_y] ); /* x2 */
terms[1] = arguments[cp_y] +
+ ( arguments[cp_size+cp_x] - arguments[cp_x] ); /* y2 */
terms[2] = 1; /* 1 */
if ( cp_values == 0 ) {
/* Image Distortion - rotate the u,v coordients too */
double
uv2[2];
uv2[0] = arguments[0] - arguments[5] + arguments[1]; /* u2 */
uv2[1] = arguments[1] + arguments[4] - arguments[0]; /* v2 */
LeastSquaresAddTerms(matrix,vectors,terms,uv2,3UL,2UL);
}
else {
/* Sparse Gradient - use values of p0 for linear gradient */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[cp_values]),3UL,number_values);
}
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,3UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
}
return(coeff);
}
case AffineProjectionDistortion:
{
/*
Arguments: Affine Matrix (forward mapping)
Arguments sx, rx, ry, sy, tx, ty
Where u = sx*x + ry*y + tx
v = rx*x + sy*y + ty
Returns coefficients (in there inverse form) ordered as...
sx ry tx rx sy ty
AffineProjection Distortion Notes...
+ Will only work with a 2 number_values for Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
double inverse[8];
if (number_arguments != 6) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs 6 coeff values'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* FUTURE: trap test for sx*sy-rx*ry == 0 (determinant = 0, no inverse) */
for(i=0; i<6UL; i++ )
inverse[i] = arguments[i];
AffineArgsToCoefficients(inverse); /* map into coefficents */
InvertAffineCoefficients(inverse, coeff); /* invert */
*method = AffineDistortion;
return(coeff);
}
case ScaleRotateTranslateDistortion:
{
/* Scale, Rotate and Translate Distortion
An alternative Affine Distortion
Argument options, by number of arguments given:
7: x,y, sx,sy, a, nx,ny
6: x,y, s, a, nx,ny
5: x,y, sx,sy, a
4: x,y, s, a
3: x,y, a
2: s, a
1: a
Where actions are (in order of application)
x,y 'center' of transforms (default = image center)
sx,sy scale image by this amount (default = 1)
a angle of rotation (argument required)
nx,ny move 'center' here (default = x,y or no movement)
And convert to affine mapping coefficients
ScaleRotateTranslate Distortion Notes...
+ Does not use a set of CPs in any normal way
+ Will only work with a 2 number_valuesal Image Distortion
+ Cannot be used for generating a sparse gradient (interpolation)
*/
double
cosine, sine,
x,y,sx,sy,a,nx,ny;
/* set default center, and default scale */
x = nx = (double)(image->columns)/2.0 + (double)image->page.x;
y = ny = (double)(image->rows)/2.0 + (double)image->page.y;
sx = sy = 1.0;
switch ( number_arguments ) {
case 0:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs at least 1 argument'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
case 1:
a = arguments[0];
break;
case 2:
sx = sy = arguments[0];
a = arguments[1];
break;
default:
x = nx = arguments[0];
y = ny = arguments[1];
switch ( number_arguments ) {
case 3:
a = arguments[2];
break;
case 4:
sx = sy = arguments[2];
a = arguments[3];
break;
case 5:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
break;
case 6:
sx = sy = arguments[2];
a = arguments[3];
nx = arguments[4];
ny = arguments[5];
break;
case 7:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
nx = arguments[5];
ny = arguments[6];
break;
default:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Too Many Arguments (7 or less)'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
break;
}
/* Trap if sx or sy == 0 -- image is scaled out of existance! */
if ( fabs(sx) < MagickEpsilon || fabs(sy) < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Zero Scale Given'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* Save the given arguments as an affine distortion */
a=DegreesToRadians(a); cosine=cos(a); sine=sin(a);
*method = AffineDistortion;
coeff[0]=cosine/sx;
coeff[1]=sine/sx;
coeff[2]=x-nx*coeff[0]-ny*coeff[1];
coeff[3]=(-sine)/sy;
coeff[4]=cosine/sy;
coeff[5]=y-nx*coeff[3]-ny*coeff[4];
return(coeff);
}
case PerspectiveDistortion:
{ /*
Perspective Distortion (a ratio of affine distortions)
p(x,y) c0*x + c1*y + c2
u = ------ = ------------------
r(x,y) c6*x + c7*y + 1
q(x,y) c3*x + c4*y + c5
v = ------ = ------------------
r(x,y) c6*x + c7*y + 1
c8 = Sign of 'r', or the denominator affine, for the actual image.
This determines what part of the distorted image is 'ground'
side of the horizon, the other part is 'sky' or invalid.
Valid values are +1.0 or -1.0 only.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
Perspective Distortion Notes...
+ Can be thought of as ratio of 3 affine transformations
+ Not separatable: r() or c6 and c7 are used by both equations
+ All 8 coefficients must be determined simultaniously
+ Will only work with a 2 number_valuesal Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
+ It is not linear, but is simple to generate an inverse
+ All lines within an image remain lines.
+ but distances between points may vary.
*/
double
**matrix,
*vectors[1],
terms[8];
size_t
cp_u = cp_values,
cp_v = cp_values+1;
MagickBooleanType
status;
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* fake 1x8 vectors matrix directly using the coefficients array */
vectors[0] = &(coeff[0]);
/* 8x8 least-squares matrix (zeroed) */
matrix = AcquireMagickMatrix(8UL,8UL);
if (matrix == (double **) NULL) {
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* Add control points for least squares solving */
for (i=0; i < number_arguments; i+=4) {
terms[0]=arguments[i+cp_x]; /* c0*x */
terms[1]=arguments[i+cp_y]; /* c1*y */
terms[2]=1.0; /* c2*1 */
terms[3]=0.0;
terms[4]=0.0;
terms[5]=0.0;
terms[6]=-terms[0]*arguments[i+cp_u]; /* 1/(c6*x) */
terms[7]=-terms[1]*arguments[i+cp_u]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_u]),
8UL,1UL);
terms[0]=0.0;
terms[1]=0.0;
terms[2]=0.0;
terms[3]=arguments[i+cp_x]; /* c3*x */
terms[4]=arguments[i+cp_y]; /* c4*y */
terms[5]=1.0; /* c5*1 */
terms[6]=-terms[3]*arguments[i+cp_v]; /* 1/(c6*x) */
terms[7]=-terms[4]*arguments[i+cp_v]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_v]),
8UL,1UL);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,8UL,1UL);
matrix = RelinquishMagickMatrix(matrix, 8UL);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image coordinate (first control point) in
destination for determination of what part of view is 'ground'.
*/
coeff[8] = coeff[6]*arguments[cp_x]
+ coeff[7]*arguments[cp_y] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
return(coeff);
}
case PerspectiveProjectionDistortion:
{
/*
Arguments: Perspective Coefficents (forward mapping)
*/
if (number_arguments != 8) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'Needs 8 coefficient values'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
return((double *) NULL);
}
/* FUTURE: trap test c0*c4-c3*c1 == 0 (determinate = 0, no inverse) */
InvertPerspectiveCoefficients(arguments, coeff);
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image cocodinate in destination for determination.
For a forward mapped perspective the images 0,0 coord will map to
c2,c5 in the distorted image, so set the sign of denominator of that.
*/
coeff[8] = coeff[6]*arguments[2]
+ coeff[7]*arguments[5] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
*method = PerspectiveDistortion;
return(coeff);
}
case BilinearForwardDistortion:
case BilinearReverseDistortion:
{
/* Bilinear Distortion (Forward mapping)
v = c0*x + c1*y + c2*x*y + c3;
for each 'value' given
This is actually a simple polynomial Distortion! The difference
however is when we need to reverse the above equation to generate a
BilinearForwardDistortion (see below).
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
double
**matrix,
**vectors,
terms[4];
MagickBooleanType
status;
/* check the number of arguments */
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(4UL,4UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x4 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*4]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = terms[0]*terms[1]; /* x*y */
terms[3] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),4UL,number_values);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,4UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( *method == BilinearForwardDistortion ) {
/* Bilinear Forward Mapped Distortion
The above least-squares solved for coefficents but in the forward
direction, due to changes to indexing constants.
i = c0*x + c1*y + c2*x*y + c3;
j = c4*x + c5*y + c6*x*y + c7;
where i,j are in the destination image, NOT the source.
Reverse Pixel mapping however needs to use reverse of these
functions. It required a full page of algbra to work out the
reversed mapping formula, but resolves down to the following...
c8 = c0*c5-c1*c4;
c9 = 2*(c2*c5-c1*c6); // '2*a' in the quadratic formula
i = i - c3; j = j - c7;
b = c6*i - c2*j + c8; // So that a*y^2 + b*y + c == 0
c = c4*i - c0*j; // y = ( -b +- sqrt(bb - 4ac) ) / (2*a)
r = b*b - c9*(c+c);
if ( c9 != 0 )
y = ( -b + sqrt(r) ) / c9;
else
y = -c/b;
x = ( i - c1*y) / ( c1 - c2*y );
NB: if 'r' is negative there is no solution!
NB: the sign of the sqrt() should be negative if image becomes
flipped or flopped, or crosses over itself.
NB: techniqually coefficient c5 is not needed, anymore,
but kept for completness.
See Anthony Thyssen <A.Thyssen@griffith.edu.au>
or Fred Weinhaus <fmw@alink.net> for more details.
*/
coeff[8] = coeff[0]*coeff[5] - coeff[1]*coeff[4];
coeff[9] = 2*(coeff[2]*coeff[5] - coeff[1]*coeff[6]);
}
return(coeff);
}
#if 0
case QuadrilateralDistortion:
{
/* Map a Quadrilateral to a unit square using BilinearReverse
Then map that unit square back to the final Quadrilateral
using BilinearForward.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
/* UNDER CONSTRUCTION */
return(coeff);
}
#endif
case PolynomialDistortion:
{
/* Polynomial Distortion
First two coefficents are used to hole global polynomal information
c0 = Order of the polynimial being created
c1 = number_of_terms in one polynomial equation
Rest of the coefficients map to the equations....
v = c0 + c1*x + c2*y + c3*x*y + c4*x^2 + c5*y^2 + c6*x^3 + ...
for each 'value' (number_values of them) given.
As such total coefficients = 2 + number_terms * number_values
Input Arguments are sets of control points...
For Distort Images order [u,v, x,y] ...
For Sparse Gradients order [x,y, r,g,b] ...
Polynomial Distortion Notes...
+ UNDER DEVELOPMENT -- Do not expect this to remain as is.
+ Currently polynomial is a reversed mapped distortion.
+ Order 1.5 is fudged to map into a bilinear distortion.
though it is not the same order as that distortion.
*/
double
**matrix,
**vectors,
*terms;
size_t
nterms; /* number of polynomial terms per number_values */
register ssize_t
j;
MagickBooleanType
status;
/* first two coefficients hold polynomial order information */
coeff[0] = arguments[0];
coeff[1] = (double) poly_number_terms(arguments[0]);
nterms = (size_t) coeff[1];
/* create matrix, a fake vectors matrix, and least sqs terms */
matrix = AcquireMagickMatrix(nterms,nterms);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
terms = (double *) AcquireQuantumMemory(nterms, sizeof(*terms));
if (matrix == (double **) NULL ||
vectors == (double **) NULL ||
terms == (double *) NULL )
{
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
terms = (double *) RelinquishMagickMemory(terms);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[2+i*nterms]);
/* Add given control point pairs for least squares solving */
for (i=1; i < number_arguments; i+=cp_size) { /* NB: start = 1 not 0 */
for (j=0; j < (ssize_t) nterms; j++)
terms[j] = poly_basis_fn(j,arguments[i+cp_x],arguments[i+cp_y]);
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),nterms,number_values);
}
terms = (double *) RelinquishMagickMemory(terms);
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,nterms,number_values);
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
return(coeff);
}
case ArcDistortion:
{
/* Arc Distortion
Args: arc_width rotate top_edge_radius bottom_edge_radius
All but first argument are optional
arc_width The angle over which to arc the image side-to-side
rotate Angle to rotate image from vertical center
top_radius Set top edge of source image at this radius
bottom_radius Set bootom edge to this radius (radial scaling)
By default, if the radii arguments are nor provided the image radius
is calculated so the horizontal center-line is fits the given arc
without scaling.
The output image size is ALWAYS adjusted to contain the whole image,
and an offset is given to position image relative to the 0,0 point of
the origin, allowing users to use relative positioning onto larger
background (via -flatten).
The arguments are converted to these coefficients
c0: angle for center of source image
c1: angle scale for mapping to source image
c2: radius for top of source image
c3: radius scale for mapping source image
c4: centerline of arc within source image
Note the coefficients use a center angle, so asymptotic join is
furthest from both sides of the source image. This also means that
for arc angles greater than 360 the sides of the image will be
trimmed equally.
Arc Distortion Notes...
+ Does not use a set of CPs
+ Will only work with Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
if ( number_arguments >= 1 && arguments[0] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Arc Angle Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( number_arguments >= 3 && arguments[2] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Outer Radius Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
coeff[0] = -MagickPI2; /* -90, place at top! */
if ( number_arguments >= 1 )
coeff[1] = DegreesToRadians(arguments[0]);
else
coeff[1] = MagickPI2; /* zero arguments - center is at top */
if ( number_arguments >= 2 )
coeff[0] += DegreesToRadians(arguments[1]);
coeff[0] /= Magick2PI; /* normalize radians */
coeff[0] -= MagickRound(coeff[0]);
coeff[0] *= Magick2PI; /* de-normalize back to radians */
coeff[3] = (double)image->rows-1;
coeff[2] = (double)image->columns/coeff[1] + coeff[3]/2.0;
if ( number_arguments >= 3 ) {
if ( number_arguments >= 4 )
coeff[3] = arguments[2] - arguments[3];
else
coeff[3] *= arguments[2]/coeff[2];
coeff[2] = arguments[2];
}
coeff[4] = ((double)image->columns-1.0)/2.0;
return(coeff);
}
case PolarDistortion:
case DePolarDistortion:
{
/* (De)Polar Distortion (same set of arguments)
Args: Rmax, Rmin, Xcenter,Ycenter, Afrom,Ato
DePolar can also have the extra arguments of Width, Height
Coefficients 0 to 5 is the sanatized version first 6 input args
Coefficient 6 is the angle to coord ratio and visa-versa
Coefficient 7 is the radius to coord ratio and visa-versa
WARNING: It is possible for Radius max<min and/or Angle from>to
*/
if ( number_arguments == 3
|| ( number_arguments > 6 && *method == PolarDistortion )
|| number_arguments > 8 ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* Rmax - if 0 calculate appropriate value */
if ( number_arguments >= 1 )
coeff[0] = arguments[0];
else
coeff[0] = 0.0;
/* Rmin - usally 0 */
coeff[1] = number_arguments >= 2 ? arguments[1] : 0.0;
/* Center X,Y */
if ( number_arguments >= 4 ) {
coeff[2] = arguments[2];
coeff[3] = arguments[3];
}
else { /* center of actual image */
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
}
/* Angle from,to - about polar center 0 is downward */
coeff[4] = -MagickPI;
if ( number_arguments >= 5 )
coeff[4] = DegreesToRadians(arguments[4]);
coeff[5] = coeff[4];
if ( number_arguments >= 6 )
coeff[5] = DegreesToRadians(arguments[5]);
if ( fabs(coeff[4]-coeff[5]) < MagickEpsilon )
coeff[5] += Magick2PI; /* same angle is a full circle */
/* if radius 0 or negative, its a special value... */
if ( coeff[0] < MagickEpsilon ) {
/* Use closest edge if radius == 0 */
if ( fabs(coeff[0]) < MagickEpsilon ) {
coeff[0]=MagickMin(fabs(coeff[2]-image->page.x),
fabs(coeff[3]-image->page.y));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[2]-image->page.x-image->columns));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[3]-image->page.y-image->rows));
}
/* furthest diagonal if radius == -1 */
if ( fabs(-1.0-coeff[0]) < MagickEpsilon ) {
double rx,ry;
rx = coeff[2]-image->page.x;
ry = coeff[3]-image->page.y;
coeff[0] = rx*rx+ry*ry;
ry = coeff[3]-image->page.y-image->rows;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
rx = coeff[2]-image->page.x-image->columns;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
ry = coeff[3]-image->page.y;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
coeff[0] = sqrt(coeff[0]);
}
}
/* IF Rmax <= 0 or Rmin < 0 OR Rmax < Rmin, THEN error */
if ( coeff[0] < MagickEpsilon || coeff[1] < -MagickEpsilon
|| (coeff[0]-coeff[1]) < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid Radius",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* converstion ratios */
if ( *method == PolarDistortion ) {
coeff[6]=(double) image->columns/(coeff[5]-coeff[4]);
coeff[7]=(double) image->rows/(coeff[0]-coeff[1]);
}
else { /* *method == DePolarDistortion */
coeff[6]=(coeff[5]-coeff[4])/image->columns;
coeff[7]=(coeff[0]-coeff[1])/image->rows;
}
return(coeff);
}
case Cylinder2PlaneDistortion:
case Plane2CylinderDistortion:
{
/* 3D Cylinder to/from a Tangential Plane
Projection between a clinder and flat plain from a point on the
center line of the cylinder.
The two surfaces coincide in 3D space at the given centers of
distortion (perpendicular to projection point) on both images.
Args: FOV_arc_width
Coefficents: FOV(radians), Radius, center_x,y, dest_center_x,y
FOV (Field Of View) the angular field of view of the distortion,
across the width of the image, in degrees. The centers are the
points of least distortion in the input and resulting images.
These centers are however determined later.
Coeff 0 is the FOV angle of view of image width in radians
Coeff 1 is calculated radius of cylinder.
Coeff 2,3 center of distortion of input image
Coefficents 4,5 Center of Distortion of dest (determined later)
*/
if ( arguments[0] < MagickEpsilon || arguments[0] > 160.0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid FOV Angle",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
coeff[0] = DegreesToRadians(arguments[0]);
if ( *method == Cylinder2PlaneDistortion )
/* image is curved around cylinder, so FOV angle (in radians)
* scales directly to image X coordinate, according to its radius.
*/
coeff[1] = (double) image->columns/coeff[0];
else
/* radius is distance away from an image with this angular FOV */
coeff[1] = (double) image->columns / ( 2 * tan(coeff[0]/2) );
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
coeff[4] = coeff[2];
coeff[5] = coeff[3]; /* assuming image size is the same */
return(coeff);
}
case BarrelDistortion:
case BarrelInverseDistortion:
{
/* Barrel Distortion
Rs=(A*Rd^3 + B*Rd^2 + C*Rd + D)*Rd
BarrelInv Distortion
Rs=Rd/(A*Rd^3 + B*Rd^2 + C*Rd + D)
Where Rd is the normalized radius from corner to middle of image
Input Arguments are one of the following forms (number of arguments)...
3: A,B,C
4: A,B,C,D
5: A,B,C X,Y
6: A,B,C,D X,Y
8: Ax,Bx,Cx,Dx Ay,By,Cy,Dy
10: Ax,Bx,Cx,Dx Ay,By,Cy,Dy X,Y
Returns 10 coefficent values, which are de-normalized (pixel scale)
Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, Xc, Yc
*/
/* Radius de-normalization scaling factor */
double
rscale = 2.0/MagickMin((double) image->columns,(double) image->rows);
/* sanity check number of args must = 3,4,5,6,8,10 or error */
if ( (number_arguments < 3) || (number_arguments == 7) ||
(number_arguments == 9) || (number_arguments > 10) )
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* A,B,C,D coefficients */
coeff[0] = arguments[0];
coeff[1] = arguments[1];
coeff[2] = arguments[2];
if ((number_arguments == 3) || (number_arguments == 5) )
coeff[3] = 1.0 - coeff[0] - coeff[1] - coeff[2];
else
coeff[3] = arguments[3];
/* de-normalize the coefficients */
coeff[0] *= pow(rscale,3.0);
coeff[1] *= rscale*rscale;
coeff[2] *= rscale;
/* Y coefficients: as given OR same as X coefficients */
if ( number_arguments >= 8 ) {
coeff[4] = arguments[4] * pow(rscale,3.0);
coeff[5] = arguments[5] * rscale*rscale;
coeff[6] = arguments[6] * rscale;
coeff[7] = arguments[7];
}
else {
coeff[4] = coeff[0];
coeff[5] = coeff[1];
coeff[6] = coeff[2];
coeff[7] = coeff[3];
}
/* X,Y Center of Distortion (image coodinates) */
if ( number_arguments == 5 ) {
coeff[8] = arguments[3];
coeff[9] = arguments[4];
}
else if ( number_arguments == 6 ) {
coeff[8] = arguments[4];
coeff[9] = arguments[5];
}
else if ( number_arguments == 10 ) {
coeff[8] = arguments[8];
coeff[9] = arguments[9];
}
else {
/* center of the image provided (image coodinates) */
coeff[8] = (double)image->columns/2.0 + image->page.x;
coeff[9] = (double)image->rows/2.0 + image->page.y;
}
return(coeff);
}
case ShepardsDistortion:
{
/* Shepards Distortion input arguments are the coefficents!
Just check the number of arguments is valid!
Args: u1,v1, x1,y1, ...
OR : u1,v1, r1,g1,c1, ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'requires CP's (4 numbers each)'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* User defined weighting power for Shepard's Method */
{ const char *artifact=GetImageArtifact(image,"shepards:power");
if ( artifact != (const char *) NULL ) {
coeff[0]=StringToDouble(artifact,(char **) NULL) / 2.0;
if ( coeff[0] < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument","%s", "-define shepards:power" );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
}
else
coeff[0]=1.0; /* Default power of 2 (Inverse Squared) */
}
return(coeff);
}
default:
break;
}
/* you should never reach this point */
perror("no method handler"); /* just fail assertion */
return((double *) NULL);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s t o r t R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortResizeImage() resize image using the equivalent but slower image
% distortion operator. The filter is applied using a EWA cylindrical
% resampling. But like resize the final image size is limited to whole pixels
% with no effects by virtual-pixels on the result.
%
% Note that images containing a transparency channel will be twice as slow to
% resize as images one without transparency.
%
% The format of the DistortResizeImage method is:
%
% Image *DistortResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *DistortResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
#define DistortResizeImageTag "Distort/Image"
Image
*resize_image,
*tmp_image;
RectangleInfo
crop_area;
double
distort_args[12];
VirtualPixelMethod
vp_save;
/*
Distort resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
/* Do not short-circuit this resize if final image size is unchanged */
(void) memset(distort_args,0,sizeof(distort_args));
distort_args[4]=(double) image->columns;
distort_args[6]=(double) columns;
distort_args[9]=(double) image->rows;
distort_args[11]=(double) rows;
vp_save=GetImageVirtualPixelMethod(image);
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if (tmp_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod,
exception);
if (image->alpha_trait == UndefinedPixelTrait)
{
/*
Image has not transparency channel, so we free to use it
*/
(void) SetImageAlphaChannel(tmp_image,SetAlphaChannel,exception);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if (resize_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel,
exception);
}
else
{
/*
Image has transparency so handle colors and alpha separatly.
Basically we need to separate Virtual-Pixel alpha in the resized
image, so only the actual original images alpha channel is used.
distort alpha channel separately
*/
Image
*resize_alpha;
(void) SetImageAlphaChannel(tmp_image,ExtractAlphaChannel,exception);
(void) SetImageAlphaChannel(tmp_image,OpaqueAlphaChannel,exception);
resize_alpha=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if (resize_alpha == (Image *) NULL)
return((Image *) NULL);
/* distort the actual image containing alpha + VP alpha */
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if (tmp_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,
TransparentVirtualPixelMethod,exception);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if (resize_image == (Image *) NULL)
{
resize_alpha=DestroyImage(resize_alpha);
return((Image *) NULL);
}
/* replace resize images alpha with the separally distorted alpha */
(void) SetImageAlphaChannel(resize_image,OffAlphaChannel,exception);
(void) SetImageAlphaChannel(resize_alpha,OffAlphaChannel,exception);
(void) CompositeImage(resize_image,resize_alpha,CopyAlphaCompositeOp,
MagickTrue,0,0,exception);
resize_alpha=DestroyImage(resize_alpha);
}
(void) SetImageVirtualPixelMethod(resize_image,vp_save,exception);
/*
Clean up the results of the Distortion
*/
crop_area.width=columns;
crop_area.height=rows;
crop_area.x=0;
crop_area.y=0;
tmp_image=resize_image;
resize_image=CropImage(tmp_image,&crop_area,exception);
tmp_image=DestroyImage(tmp_image);
if (resize_image != (Image *) NULL)
{
resize_image->alpha_trait=image->alpha_trait;
resize_image->compose=image->compose;
resize_image->page.width=0;
resize_image->page.height=0;
}
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D i s t o r t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortImage() distorts an image using various distortion methods, by
% mapping color lookups of the source image to a new destination image
% usally of the same size as the source image, unless 'bestfit' is set to
% true.
%
% If 'bestfit' is enabled, and distortion allows it, the destination image is
% adjusted to ensure the whole source 'image' will just fit within the final
% destination image, which will be sized and offset accordingly. Also in
% many cases the virtual offset of the source image will be taken into
% account in the mapping.
%
% If the '-verbose' control option has been set print to standard error the
% equicelent '-fx' formula with coefficients for the function, if practical.
%
% The format of the DistortImage() method is:
%
% Image *DistortImage(const Image *image,const DistortMethod method,
% const size_t number_arguments,const double *arguments,
% MagickBooleanType bestfit, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion.
%
% ArcDistortion always ignores source image offset, and always
% 'bestfit' the destination image with the top left corner offset
% relative to the polar mapping center.
%
% Affine, Perspective, and Bilinear, do least squares fitting of the
% distrotion when more than the minimum number of control point pairs
% are provided.
%
% Perspective, and Bilinear, fall back to a Affine distortion when less
% than 4 control point pairs are provided. While Affine distortions
% let you use any number of control point pairs, that is Zero pairs is
% a No-Op (viewport only) distortion, one pair is a translation and
% two pairs of control points do a scale-rotate-translate, without any
% shearing.
%
% o number_arguments: the number of arguments given.
%
% o arguments: an array of floating point arguments for this method.
%
% o bestfit: Attempt to 'bestfit' the size of the resulting image.
% This also forces the resulting image to be a 'layered' virtual
% canvas image. Can be overridden using 'distort:viewport' setting.
%
% o exception: return any errors or warnings in this structure
%
% Extra Controls from Image meta-data (artifacts)...
%
% o "verbose"
% Output to stderr alternatives, internal coefficents, and FX
% equivalents for the distortion operation (if feasible).
% This forms an extra check of the distortion method, and allows users
% access to the internal constants IM calculates for the distortion.
%
% o "distort:viewport"
% Directly set the output image canvas area and offest to use for the
% resulting image, rather than use the original images canvas, or a
% calculated 'bestfit' canvas.
%
% o "distort:scale"
% Scale the size of the output canvas by this amount to provide a
% method of Zooming, and for super-sampling the results.
%
% Other settings that can effect results include
%
% o 'interpolate' For source image lookups (scale enlargements)
%
% o 'filter' Set filter to use for area-resampling (scale shrinking).
% Set to 'point' to turn off and use 'interpolate' lookup
% instead
%
*/
MagickExport Image *DistortImage(const Image *image, DistortMethod method,
const size_t number_arguments,const double *arguments,
MagickBooleanType bestfit,ExceptionInfo *exception)
{
#define DistortImageTag "Distort/Image"
double
*coeff,
output_scaling;
Image
*distort_image;
RectangleInfo
geometry; /* geometry of the distorted space viewport */
MagickBooleanType
viewport_given;
PixelInfo
invalid; /* the color to assign when distort result is invalid */
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Handle Special Compound Distortions
*/
if ( method == ResizeDistortion )
{
if ( number_arguments != 2 )
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Resize",
"Invalid number of args: 2 only");
return((Image *) NULL);
}
distort_image=DistortResizeImage(image,(size_t)arguments[0],
(size_t)arguments[1], exception);
return(distort_image);
}
/*
Convert input arguments (usually as control points for reverse mapping)
into mapping coefficients to apply the distortion.
Note that some distortions are mapped to other distortions,
and as such do not require specific code after this point.
*/
coeff = GenerateCoefficients(image, &method, number_arguments,
arguments, 0, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Determine the size and offset for a 'bestfit' destination.
Usally the four corners of the source image is enough.
*/
/* default output image bounds, when no 'bestfit' is requested */
geometry.width=image->columns;
geometry.height=image->rows;
geometry.x=0;
geometry.y=0;
if ( method == ArcDistortion ) {
bestfit = MagickTrue; /* always calculate a 'best fit' viewport */
}
/* Work out the 'best fit', (required for ArcDistortion) */
if ( bestfit ) {
PointInfo
s,d,min,max; /* source, dest coords --mapping--> min, max coords */
MagickBooleanType
fix_bounds = MagickTrue; /* enlarge bounds for VP handling */
s.x=s.y=min.x=max.x=min.y=max.y=0.0; /* keep compiler happy */
/* defines to figure out the bounds of the distorted image */
#define InitalBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = max.x = p.x; \
min.y = max.y = p.y; \
}
#define ExpandBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = MagickMin(min.x,p.x); \
max.x = MagickMax(max.x,p.x); \
min.y = MagickMin(min.y,p.y); \
max.y = MagickMax(max.y,p.y); \
}
switch (method)
{
case AffineDistortion:
{ double inverse[6];
InvertAffineCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
break;
}
case PerspectiveDistortion:
{ double inverse[8], scale;
InvertPerspectiveCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
break;
}
case ArcDistortion:
{ double a, ca, sa;
/* Forward Map Corners */
a = coeff[0]-coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
InitalBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
a = coeff[0]+coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
/* Orthogonal points along top of arc */
for( a=(double) (ceil((double) ((coeff[0]-coeff[1]/2.0)/MagickPI2))*MagickPI2);
a<(coeff[0]+coeff[1]/2.0); a+=MagickPI2 ) {
ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
}
/*
Convert the angle_to_width and radius_to_height
to appropriate scaling factors, to allow faster processing
in the mapping function.
*/
coeff[1] = (double) (Magick2PI*image->columns/coeff[1]);
coeff[3] = (double)image->rows/coeff[3];
break;
}
case PolarDistortion:
{
if (number_arguments < 2)
coeff[2] = coeff[3] = 0.0;
min.x = coeff[2]-coeff[0];
max.x = coeff[2]+coeff[0];
min.y = coeff[3]-coeff[0];
max.y = coeff[3]+coeff[0];
/* should be about 1.0 if Rmin = 0 */
coeff[7]=(double) geometry.height/(coeff[0]-coeff[1]);
break;
}
case DePolarDistortion:
{
/* direct calculation as it needs to tile correctly
* for reversibility in a DePolar-Polar cycle */
fix_bounds = MagickFalse;
geometry.x = geometry.y = 0;
geometry.height = (size_t) ceil(coeff[0]-coeff[1]);
geometry.width = (size_t)
ceil((coeff[0]-coeff[1])*(coeff[5]-coeff[4])*0.5);
/* correct scaling factors relative to new size */
coeff[6]=(coeff[5]-coeff[4])/geometry.width; /* changed width */
coeff[7]=(coeff[0]-coeff[1])/geometry.height; /* should be about 1.0 */
break;
}
case Cylinder2PlaneDistortion:
{
/* direct calculation so center of distortion is either a pixel
* center, or pixel edge. This allows for reversibility of the
* distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil( 2.0*coeff[1]*tan(coeff[0]/2.0) );
geometry.height = (size_t) ceil( 2.0*coeff[3]/cos(coeff[0]/2.0) );
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case Plane2CylinderDistortion:
{
/* direct calculation center is either pixel center, or pixel edge
* so as to allow reversibility of the image distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil(coeff[0]*coeff[1]); /* FOV * radius */
geometry.height = (size_t) (2*coeff[3]); /* input image height */
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case ShepardsDistortion:
case BilinearForwardDistortion:
case BilinearReverseDistortion:
#if 0
case QuadrilateralDistortion:
#endif
case PolynomialDistortion:
case BarrelDistortion:
case BarrelInverseDistortion:
default:
/* no calculated bestfit available for these distortions */
bestfit = MagickFalse;
fix_bounds = MagickFalse;
break;
}
/* Set the output image geometry to calculated 'bestfit'.
Yes this tends to 'over do' the file image size, ON PURPOSE!
Do not do this for DePolar which needs to be exact for virtual tiling.
*/
if ( fix_bounds ) {
geometry.x = (ssize_t) floor(min.x-0.5);
geometry.y = (ssize_t) floor(min.y-0.5);
geometry.width=(size_t) ceil(max.x-geometry.x+0.5);
geometry.height=(size_t) ceil(max.y-geometry.y+0.5);
}
} /* end bestfit destination image calculations */
/* The user provided a 'viewport' expert option which may
overrides some parts of the current output image geometry.
This also overrides its default 'bestfit' setting.
*/
{ const char *artifact=GetImageArtifact(image,"distort:viewport");
viewport_given = MagickFalse;
if ( artifact != (const char *) NULL ) {
MagickStatusType flags=ParseAbsoluteGeometry(artifact,&geometry);
if (flags==NoValue)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"distort:viewport",artifact);
else
viewport_given = MagickTrue;
}
}
/* Verbose output */
if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) {
register ssize_t
i;
char image_gen[MagickPathExtent];
const char *lookup;
/* Set destination image size and virtual offset */
if ( bestfit || viewport_given ) {
(void) FormatLocaleString(image_gen, MagickPathExtent," -size %.20gx%.20g "
"-page %+.20g%+.20g xc: +insert \\\n",(double) geometry.width,
(double) geometry.height,(double) geometry.x,(double) geometry.y);
lookup="v.p{ xx-v.page.x-.5, yy-v.page.y-.5 }";
}
else {
image_gen[0] = '\0'; /* no destination to generate */
lookup = "p{ xx-page.x-.5, yy-page.y-.5 }"; /* simplify lookup */
}
switch (method) {
case AffineDistortion:
{
double *inverse;
inverse = (double *) AcquireQuantumMemory(6,sizeof(*inverse));
if (inverse == (double *) NULL) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortImages");
return((Image *) NULL);
}
InvertAffineCoefficients(coeff, inverse);
CoefficientsToAffineArgs(inverse);
(void) FormatLocaleFile(stderr, "Affine Projection:\n");
(void) FormatLocaleFile(stderr, " -distort AffineProjection \\\n '");
for (i=0; i < 5; i++)
(void) FormatLocaleFile(stderr, "%lf,", inverse[i]);
(void) FormatLocaleFile(stderr, "%lf'\n", inverse[5]);
inverse = (double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr, "Affine Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf;\n",
coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf;\n",
coeff[3], coeff[4], coeff[5]);
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
}
case PerspectiveDistortion:
{
double *inverse;
inverse = (double *) AcquireQuantumMemory(8,sizeof(*inverse));
if (inverse == (double *) NULL) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((Image *) NULL);
}
InvertPerspectiveCoefficients(coeff, inverse);
(void) FormatLocaleFile(stderr, "Perspective Projection:\n");
(void) FormatLocaleFile(stderr, " -distort PerspectiveProjection \\\n '");
for (i=0; i<4; i++)
(void) FormatLocaleFile(stderr, "%lf, ", inverse[i]);
(void) FormatLocaleFile(stderr, "\n ");
for (; i<7; i++)
(void) FormatLocaleFile(stderr, "%lf, ", inverse[i]);
(void) FormatLocaleFile(stderr, "%lf'\n", inverse[7]);
inverse = (double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr, "Perspective Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " rr=%+lf*ii %+lf*jj + 1;\n",
coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr, " xx=(%+lf*ii %+lf*jj %+lf)/rr;\n",
coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " yy=(%+lf*ii %+lf*jj %+lf)/rr;\n",
coeff[3], coeff[4], coeff[5]);
(void) FormatLocaleFile(stderr, " rr%s0 ? %s : blue' \\\n",
coeff[8] < 0 ? "<" : ">", lookup);
break;
}
case BilinearForwardDistortion:
(void) FormatLocaleFile(stderr, "BilinearForward Mapping Equations:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " i = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[0], coeff[1], coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " j = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[4], coeff[5], coeff[6], coeff[7]);
#if 0
/* for debugging */
(void) FormatLocaleFile(stderr, " c8 = %+lf c9 = 2*a = %+lf;\n",
coeff[8], coeff[9]);
#endif
(void) FormatLocaleFile(stderr, "BilinearForward Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",
0.5-coeff[3], 0.5-coeff[7]);
(void) FormatLocaleFile(stderr, " bb=%lf*ii %+lf*jj %+lf;\n",
coeff[6], -coeff[2], coeff[8]);
/* Handle Special degenerate (non-quadratic) or trapezoidal case */
if ( coeff[9] != 0 ) {
(void) FormatLocaleFile(stderr, " rt=bb*bb %+lf*(%lf*ii%+lf*jj);\n",
-2*coeff[9], coeff[4], -coeff[0]);
(void) FormatLocaleFile(stderr, " yy=( -bb + sqrt(rt) ) / %lf;\n",
coeff[9]);
} else
(void) FormatLocaleFile(stderr, " yy=(%lf*ii%+lf*jj)/bb;\n",
-coeff[4], coeff[0]);
(void) FormatLocaleFile(stderr, " xx=(ii %+lf*yy)/(%lf %+lf*yy);\n",
-coeff[1], coeff[0], coeff[2]);
if ( coeff[9] != 0 )
(void) FormatLocaleFile(stderr, " (rt < 0 ) ? red : %s'\n", lookup);
else
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
case BilinearReverseDistortion:
#if 0
(void) FormatLocaleFile(stderr, "Polynomial Projection Distort:\n");
(void) FormatLocaleFile(stderr, " -distort PolynomialProjection \\\n");
(void) FormatLocaleFile(stderr, " '1.5, %lf, %lf, %lf, %lf,\n",
coeff[3], coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " %lf, %lf, %lf, %lf'\n",
coeff[7], coeff[4], coeff[5], coeff[6]);
#endif
(void) FormatLocaleFile(stderr, "BilinearReverse Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",
coeff[0], coeff[1], coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",
coeff[4], coeff[5], coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
case PolynomialDistortion:
{
size_t nterms = (size_t) coeff[1];
(void) FormatLocaleFile(stderr, "Polynomial (order %lg, terms %lu), FX Equivelent\n",
coeff[0],(unsigned long) nterms);
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx =");
for (i=0; i<(ssize_t) nterms; i++) {
if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr, ";\n yy =");
for (i=0; i<(ssize_t) nterms; i++) {
if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i+nterms],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr, ";\n %s' \\\n", lookup);
break;
}
case ArcDistortion:
{
(void) FormatLocaleFile(stderr, "Arc Distort, Internal Coefficients:\n");
for ( i=0; i<5; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "Arc Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x; jj=j+page.y;\n");
(void) FormatLocaleFile(stderr, " xx=(atan2(jj,ii)%+lf)/(2*pi);\n",
-coeff[0]);
(void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr, " xx=xx*%lf %+lf;\n",
coeff[1], coeff[4]);
(void) FormatLocaleFile(stderr, " yy=(%lf - hypot(ii,jj)) * %lf;\n",
coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case PolarDistortion:
{
(void) FormatLocaleFile(stderr, "Polar Distort, Internal Coefficents\n");
for ( i=0; i<8; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "Polar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",
-coeff[2], -coeff[3]);
(void) FormatLocaleFile(stderr, " xx=(atan2(ii,jj)%+lf)/(2*pi);\n",
-(coeff[4]+coeff[5])/2 );
(void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr, " xx=xx*2*pi*%lf + v.w/2;\n",
coeff[6] );
(void) FormatLocaleFile(stderr, " yy=(hypot(ii,jj)%+lf)*%lf;\n",
-coeff[1], coeff[7] );
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case DePolarDistortion:
{
(void) FormatLocaleFile(stderr, "DePolar Distort, Internal Coefficents\n");
for ( i=0; i<8; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "DePolar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'aa=(i+.5)*%lf %+lf;\n", coeff[6], +coeff[4] );
(void) FormatLocaleFile(stderr, " rr=(j+.5)*%lf %+lf;\n", coeff[7], +coeff[1] );
(void) FormatLocaleFile(stderr, " xx=rr*sin(aa) %+lf;\n", coeff[2] );
(void) FormatLocaleFile(stderr, " yy=rr*cos(aa) %+lf;\n", coeff[3] );
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case Cylinder2PlaneDistortion:
{
(void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]);
(void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",
-coeff[4], -coeff[5]);
(void) FormatLocaleFile(stderr, " aa=atan(ii/%+lf);\n", coeff[1] );
(void) FormatLocaleFile(stderr, " xx=%lf*aa%+lf;\n",
coeff[1], coeff[2] );
(void) FormatLocaleFile(stderr, " yy=jj*cos(aa)%+lf;\n", coeff[3] );
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
}
case Plane2CylinderDistortion:
{
(void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]);
(void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",
-coeff[4], -coeff[5]);
(void) FormatLocaleFile(stderr, " ii=ii/%+lf;\n", coeff[1] );
(void) FormatLocaleFile(stderr, " xx=%lf*tan(ii)%+lf;\n",
coeff[1], coeff[2] );
(void) FormatLocaleFile(stderr, " yy=jj/cos(ii)%+lf;\n",
coeff[3] );
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ double xc,yc;
/* NOTE: This does the barrel roll in pixel coords not image coords
** The internal distortion must do it in image coordinates,
** so that is what the center coeff (8,9) is given in.
*/
xc = ((double)image->columns-1.0)/2.0 + image->page.x;
yc = ((double)image->rows-1.0)/2.0 + image->page.y;
(void) FormatLocaleFile(stderr, "Barrel%s Distort, FX Equivelent:\n",
method == BarrelDistortion ? "" : "Inv");
(void) FormatLocaleFile(stderr, "%s", image_gen);
if ( fabs(coeff[8]-xc-0.5) < 0.1 && fabs(coeff[9]-yc-0.5) < 0.1 )
(void) FormatLocaleFile(stderr, " -fx 'xc=(w-1)/2; yc=(h-1)/2;\n");
else
(void) FormatLocaleFile(stderr, " -fx 'xc=%lf; yc=%lf;\n",
coeff[8]-0.5, coeff[9]-0.5);
(void) FormatLocaleFile(stderr,
" ii=i-xc; jj=j-yc; rr=hypot(ii,jj);\n");
(void) FormatLocaleFile(stderr, " ii=ii%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",
coeff[0],coeff[1],coeff[2],coeff[3]);
(void) FormatLocaleFile(stderr, " jj=jj%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",
coeff[4],coeff[5],coeff[6],coeff[7]);
(void) FormatLocaleFile(stderr, " v.p{fx*ii+xc,fy*jj+yc}' \\\n");
}
default:
break;
}
}
/* The user provided a 'scale' expert option will scale the
output image size, by the factor given allowing for super-sampling
of the distorted image space. Any scaling factors must naturally
be halved as a result.
*/
{ const char *artifact;
artifact=GetImageArtifact(image,"distort:scale");
output_scaling = 1.0;
if (artifact != (const char *) NULL) {
output_scaling = fabs(StringToDouble(artifact,(char **) NULL));
geometry.width=(size_t) (output_scaling*geometry.width+0.5);
geometry.height=(size_t) (output_scaling*geometry.height+0.5);
geometry.x=(ssize_t) (output_scaling*geometry.x+0.5);
geometry.y=(ssize_t) (output_scaling*geometry.y+0.5);
if ( output_scaling < 0.1 ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s", "-set option:distort:scale" );
return((Image *) NULL);
}
output_scaling = 1/output_scaling;
}
}
#define ScaleFilter(F,A,B,C,D) \
ScaleResampleFilter( (F), \
output_scaling*(A), output_scaling*(B), \
output_scaling*(C), output_scaling*(D) )
/*
Initialize the distort image attributes.
*/
distort_image=CloneImage(image,geometry.width,geometry.height,MagickTrue,
exception);
if (distort_image == (Image *) NULL)
{
coeff=(double *) RelinquishMagickMemory(coeff);
return((Image *) NULL);
}
/* if image is ColorMapped - change it to DirectClass */
if (SetImageStorageClass(distort_image,DirectClass,exception) == MagickFalse)
{
coeff=(double *) RelinquishMagickMemory(coeff);
distort_image=DestroyImage(distort_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&distort_image->background_color) == MagickFalse) &&
(IsGrayColorspace(distort_image->colorspace) != MagickFalse))
(void) SetImageColorspace(distort_image,sRGBColorspace,exception);
if (distort_image->background_color.alpha_trait != UndefinedPixelTrait)
distort_image->alpha_trait=BlendPixelTrait;
distort_image->page.x=geometry.x;
distort_image->page.y=geometry.y;
ConformPixelInfo(distort_image,&distort_image->matte_color,&invalid,
exception);
{ /* ----- MAIN CODE -----
Sample the source image to each pixel in the distort image.
*/
CacheView
*distort_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
ResampleFilter
**magick_restrict resample_filter;
ssize_t
j;
status=MagickTrue;
progress=0;
GetPixelInfo(distort_image,&zero);
resample_filter=AcquireResampleFilterThreadSet(image,
UndefinedVirtualPixelMethod,MagickFalse,exception);
distort_view=AcquireAuthenticCacheView(distort_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,distort_image,distort_image->rows,1)
#endif
for (j=0; j < (ssize_t) distort_image->rows; j++)
{
const int
id = GetOpenMPThreadId();
double
validity; /* how mathematically valid is this the mapping */
MagickBooleanType
sync;
PixelInfo
pixel; /* pixel color to assign to distorted image */
PointInfo
d,
s; /* transform destination image x,y to source image x,y */
register ssize_t
i;
register Quantum
*magick_restrict q;
q=QueueCacheViewAuthenticPixels(distort_view,0,j,distort_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
/* Define constant scaling vectors for Affine Distortions
Other methods are either variable, or use interpolated lookup
*/
switch (method)
{
case AffineDistortion:
ScaleFilter( resample_filter[id],
coeff[0], coeff[1],
coeff[3], coeff[4] );
break;
default:
break;
}
/* Initialize default pixel validity
* negative: pixel is invalid output 'matte_color'
* 0.0 to 1.0: antialiased, mix with resample output
* 1.0 or greater: use resampled output.
*/
validity = 1.0;
for (i=0; i < (ssize_t) distort_image->columns; i++)
{
/* map pixel coordinate to distortion space coordinate */
d.x = (double) (geometry.x+i+0.5)*output_scaling;
d.y = (double) (geometry.y+j+0.5)*output_scaling;
s = d; /* default is a no-op mapping */
switch (method)
{
case AffineDistortion:
{
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
s.y=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
/* Affine partial derivitives are constant -- set above */
break;
}
case PerspectiveDistortion:
{
double
p,q,r,abs_r,abs_c6,abs_c7,scale;
/* perspective is a ratio of affines */
p=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
q=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
r=coeff[6]*d.x+coeff[7]*d.y+1.0;
/* Pixel Validity -- is it a 'sky' or 'ground' pixel */
validity = (r*coeff[8] < 0.0) ? 0.0 : 1.0;
/* Determine horizon anti-alias blending */
abs_r = fabs(r)*2;
abs_c6 = fabs(coeff[6]);
abs_c7 = fabs(coeff[7]);
if ( abs_c6 > abs_c7 ) {
if ( abs_r < abs_c6*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[6]*output_scaling);
}
else if ( abs_r < abs_c7*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[7]*output_scaling);
/* Perspective Sampling Point (if valid) */
if ( validity > 0.0 ) {
/* divide by r affine, for perspective scaling */
scale = 1.0/r;
s.x = p*scale;
s.y = q*scale;
/* Perspective Partial Derivatives or Scaling Vectors */
scale *= scale;
ScaleFilter( resample_filter[id],
(r*coeff[0] - p*coeff[6])*scale,
(r*coeff[1] - p*coeff[7])*scale,
(r*coeff[3] - q*coeff[6])*scale,
(r*coeff[4] - q*coeff[7])*scale );
}
break;
}
case BilinearReverseDistortion:
{
/* Reversed Mapped is just a simple polynomial */
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]*d.x*d.y+coeff[3];
s.y=coeff[4]*d.x+coeff[5]*d.y
+coeff[6]*d.x*d.y+coeff[7];
/* Bilinear partial derivitives of scaling vectors */
ScaleFilter( resample_filter[id],
coeff[0] + coeff[2]*d.y,
coeff[1] + coeff[2]*d.x,
coeff[4] + coeff[6]*d.y,
coeff[5] + coeff[6]*d.x );
break;
}
case BilinearForwardDistortion:
{
/* Forward mapped needs reversed polynomial equations
* which unfortunatally requires a square root! */
double b,c;
d.x -= coeff[3]; d.y -= coeff[7];
b = coeff[6]*d.x - coeff[2]*d.y + coeff[8];
c = coeff[4]*d.x - coeff[0]*d.y;
validity = 1.0;
/* Handle Special degenerate (non-quadratic) case
* Currently without horizon anti-alising */
if ( fabs(coeff[9]) < MagickEpsilon )
s.y = -c/b;
else {
c = b*b - 2*coeff[9]*c;
if ( c < 0.0 )
validity = 0.0;
else
s.y = ( -b + sqrt(c) )/coeff[9];
}
if ( validity > 0.0 )
s.x = ( d.x - coeff[1]*s.y) / ( coeff[0] + coeff[2]*s.y );
/* NOTE: the sign of the square root should be -ve for parts
where the source image becomes 'flipped' or 'mirrored'.
FUTURE: Horizon handling
FUTURE: Scaling factors or Deritives (how?)
*/
break;
}
#if 0
case BilinearDistortion:
/* Bilinear mapping of any Quadrilateral to any Quadrilateral */
/* UNDER DEVELOPMENT */
break;
#endif
case PolynomialDistortion:
{
/* multi-ordered polynomial */
register ssize_t
k;
ssize_t
nterms=(ssize_t)coeff[1];
PointInfo
du,dv; /* the du,dv vectors from unit dx,dy -- derivatives */
s.x=s.y=du.x=du.y=dv.x=dv.y=0.0;
for(k=0; k < nterms; k++) {
s.x += poly_basis_fn(k,d.x,d.y)*coeff[2+k];
du.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k];
du.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k];
s.y += poly_basis_fn(k,d.x,d.y)*coeff[2+k+nterms];
dv.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k+nterms];
dv.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k+nterms];
}
ScaleFilter( resample_filter[id], du.x,du.y,dv.x,dv.y );
break;
}
case ArcDistortion:
{
/* what is the angle and radius in the destination image */
s.x = (double) ((atan2(d.y,d.x) - coeff[0])/Magick2PI);
s.x -= MagickRound(s.x); /* angle */
s.y = hypot(d.x,d.y); /* radius */
/* Arc Distortion Partial Scaling Vectors
Are derived by mapping the perpendicular unit vectors
dR and dA*R*2PI rather than trying to map dx and dy
The results is a very simple orthogonal aligned ellipse.
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[1]/(Magick2PI*s.y)), 0, 0, coeff[3] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[3] );
/* now scale the angle and radius for source image lookup point */
s.x = s.x*coeff[1] + coeff[4] + image->page.x +0.5;
s.y = (coeff[2] - s.y) * coeff[3] + image->page.y;
break;
}
case PolarDistortion:
{ /* 2D Cartesain to Polar View */
d.x -= coeff[2];
d.y -= coeff[3];
s.x = atan2(d.x,d.y) - (coeff[4]+coeff[5])/2;
s.x /= Magick2PI;
s.x -= MagickRound(s.x);
s.x *= Magick2PI; /* angle - relative to centerline */
s.y = hypot(d.x,d.y); /* radius */
/* Polar Scaling vectors are based on mapping dR and dA vectors
This results in very simple orthogonal scaling vectors
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[6]/(Magick2PI*s.y)), 0, 0, coeff[7] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[7] );
/* now finish mapping radius/angle to source x,y coords */
s.x = s.x*coeff[6] + (double)image->columns/2.0 + image->page.x;
s.y = (s.y-coeff[1])*coeff[7] + image->page.y;
break;
}
case DePolarDistortion:
{ /* @D Polar to Carteasain */
/* ignore all destination virtual offsets */
d.x = ((double)i+0.5)*output_scaling*coeff[6]+coeff[4];
d.y = ((double)j+0.5)*output_scaling*coeff[7]+coeff[1];
s.x = d.y*sin(d.x) + coeff[2];
s.y = d.y*cos(d.x) + coeff[3];
/* derivatives are usless - better to use SuperSampling */
break;
}
case Cylinder2PlaneDistortion:
{ /* 3D Cylinder to Tangential Plane */
double ax, cx;
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
d.x /= coeff[1]; /* x' = x/r */
ax=atan(d.x); /* aa = atan(x/r) = u/r */
cx=cos(ax); /* cx = cos(atan(x/r)) = 1/sqrt(x^2+u^2) */
s.x = coeff[1]*ax; /* u = r*atan(x/r) */
s.y = d.y*cx; /* v = y*cos(u/r) */
/* derivatives... (see personnal notes) */
ScaleFilter( resample_filter[id],
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
#if 0
if ( i == 0 && j == 0 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "phi = %lf\n", (double)(ax * 180.0/MagickPI) );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
fflush(stderr); }
#endif
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case Plane2CylinderDistortion:
{ /* 3D Cylinder to Tangential Plane */
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
/* is pixel valid - horizon of a infinite Virtual-Pixel Plane
* (see Anthony Thyssen's personal note) */
validity = (double) (coeff[1]*MagickPI2 - fabs(d.x))/output_scaling + 0.5;
if ( validity > 0.0 ) {
double cx,tx;
d.x /= coeff[1]; /* x'= x/r */
cx = 1/cos(d.x); /* cx = 1/cos(x/r) */
tx = tan(d.x); /* tx = tan(x/r) */
s.x = coeff[1]*tx; /* u = r * tan(x/r) */
s.y = d.y*cx; /* v = y / cos(x/r) */
/* derivatives... (see Anthony Thyssen's personal notes) */
ScaleFilter( resample_filter[id],
cx*cx, 0.0, s.y*cx/coeff[1], cx );
#if 0
/*if ( i == 0 && j == 0 )*/
if ( d.x == 0.5 && d.y == 0.5 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "radius = %lf phi = %lf validity = %lf\n",
coeff[1], (double)(d.x * 180.0/MagickPI), validity );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
cx*cx, 0.0, s.y*cx/coeff[1], cx);
fflush(stderr); }
#endif
}
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ /* Lens Barrel Distionion Correction */
double r,fx,fy,gx,gy;
/* Radial Polynomial Distortion (de-normalized) */
d.x -= coeff[8];
d.y -= coeff[9];
r = sqrt(d.x*d.x+d.y*d.y);
if ( r > MagickEpsilon ) {
fx = ((coeff[0]*r + coeff[1])*r + coeff[2])*r + coeff[3];
fy = ((coeff[4]*r + coeff[5])*r + coeff[6])*r + coeff[7];
gx = ((3*coeff[0]*r + 2*coeff[1])*r + coeff[2])/r;
gy = ((3*coeff[4]*r + 2*coeff[5])*r + coeff[6])/r;
/* adjust functions and scaling for 'inverse' form */
if ( method == BarrelInverseDistortion ) {
fx = 1/fx; fy = 1/fy;
gx *= -fx*fx; gy *= -fy*fy;
}
/* Set the source pixel to lookup and EWA derivative vectors */
s.x = d.x*fx + coeff[8];
s.y = d.y*fy + coeff[9];
ScaleFilter( resample_filter[id],
gx*d.x*d.x + fx, gx*d.x*d.y,
gy*d.x*d.y, gy*d.y*d.y + fy );
}
else {
/* Special handling to avoid divide by zero when r==0
**
** The source and destination pixels match in this case
** which was set at the top of the loop using s = d;
** otherwise... s.x=coeff[8]; s.y=coeff[9];
*/
if ( method == BarrelDistortion )
ScaleFilter( resample_filter[id],
coeff[3], 0, 0, coeff[7] );
else /* method == BarrelInverseDistortion */
/* FUTURE, trap for D==0 causing division by zero */
ScaleFilter( resample_filter[id],
1.0/coeff[3], 0, 0, 1.0/coeff[7] );
}
break;
}
case ShepardsDistortion:
{ /* Shepards Method, or Inverse Weighted Distance for
displacement around the destination image control points
The input arguments are the coefficents to the function.
This is more of a 'displacement' function rather than an
absolute distortion function.
Note: We can not determine derivatives using shepards method
so only a point sample interpolatation can be used.
*/
size_t
i;
double
denominator;
denominator = s.x = s.y = 0;
for(i=0; i<number_arguments; i+=4) {
double weight =
((double)d.x-arguments[i+2])*((double)d.x-arguments[i+2])
+ ((double)d.y-arguments[i+3])*((double)d.y-arguments[i+3]);
weight = pow(weight,coeff[0]); /* shepards power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
s.x += (arguments[ i ]-arguments[i+2])*weight;
s.y += (arguments[i+1]-arguments[i+3])*weight;
denominator += weight;
}
s.x /= denominator;
s.y /= denominator;
s.x += d.x; /* make it as relative displacement */
s.y += d.y;
break;
}
default:
break; /* use the default no-op given above */
}
/* map virtual canvas location back to real image coordinate */
if ( bestfit && method != ArcDistortion ) {
s.x -= image->page.x;
s.y -= image->page.y;
}
s.x -= 0.5;
s.y -= 0.5;
if ( validity <= 0.0 ) {
/* result of distortion is an invalid pixel - don't resample */
SetPixelViaPixelInfo(distort_image,&invalid,q);
}
else {
/* resample the source image to find its correct color */
(void) ResamplePixelColor(resample_filter[id],s.x,s.y,&pixel,
exception);
/* if validity between 0.0 and 1.0 mix result with invalid pixel */
if ( validity < 1.0 ) {
/* Do a blend of sample color and invalid pixel */
/* should this be a 'Blend', or an 'Over' compose */
CompositePixelInfoBlend(&pixel,validity,&invalid,(1.0-validity),
&pixel);
}
SetPixelViaPixelInfo(distort_image,&pixel,q);
}
q+=GetPixelChannels(distort_image);
}
sync=SyncCacheViewAuthenticPixels(distort_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_DistortImage)
#endif
proceed=SetImageProgress(image,DistortImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
distort_view=DestroyCacheView(distort_view);
resample_filter=DestroyResampleFilterThreadSet(resample_filter);
if (status == MagickFalse)
distort_image=DestroyImage(distort_image);
}
/* Arc does not return an offset unless 'bestfit' is in effect
And the user has not provided an overriding 'viewport'.
*/
if ( method == ArcDistortion && !bestfit && !viewport_given ) {
distort_image->page.x = 0;
distort_image->page.y = 0;
}
coeff=(double *) RelinquishMagickMemory(coeff);
return(distort_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. RotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the RotateImage method is:
%
% Image *RotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*distort_image,
*rotate_image;
double
angle;
PointInfo
shear;
size_t
rotations;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
angle=fmod(degrees,360.0);
while (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((fabs(shear.x) < MagickEpsilon) && (fabs(shear.y) < MagickEpsilon))
return(IntegralRotateImage(image,rotations,exception));
distort_image=CloneImage(image,0,0,MagickTrue,exception);
if (distort_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(distort_image,BackgroundVirtualPixelMethod,
exception);
rotate_image=DistortImage(distort_image,ScaleRotateTranslateDistortion,1,
°rees,MagickTrue,exception);
distort_image=DestroyImage(distort_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p a r s e C o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SparseColorImage(), given a set of coordinates, interpolates the colors
% found at those coordinates, across the whole image, using various methods.
%
% The format of the SparseColorImage() method is:
%
% Image *SparseColorImage(const Image *image,
% const SparseColorMethod method,const size_t number_arguments,
% const double *arguments,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be filled in.
%
% o method: the method to fill in the gradient between the control points.
%
% The methods used for SparseColor() are often simular to methods
% used for DistortImage(), and even share the same code for determination
% of the function coefficents, though with more dimensions (or resulting
% values).
%
% o number_arguments: the number of arguments given.
%
% o arguments: array of floating point arguments for this method--
% x,y,color_values-- with color_values given as normalized values.
%
% o exception: return any errors or warnings in this structure
%
*/
MagickExport Image *SparseColorImage(const Image *image,
const SparseColorMethod method,const size_t number_arguments,
const double *arguments,ExceptionInfo *exception)
{
#define SparseColorTag "Distort/SparseColor"
SparseColorMethod
sparse_method;
double
*coeff;
Image
*sparse_image;
size_t
number_colors;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/* Determine number of color values needed per control point */
number_colors=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
number_colors++;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
number_colors++;
/*
Convert input arguments into mapping coefficients, this this case
we are mapping (distorting) colors, rather than coordinates.
*/
{ DistortMethod
distort_method;
distort_method=(DistortMethod) method;
if ( distort_method >= SentinelDistortion )
distort_method = ShepardsDistortion; /* Pretend to be Shepards */
coeff = GenerateCoefficients(image, &distort_method, number_arguments,
arguments, number_colors, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Note some Distort Methods may fall back to other simpler methods,
Currently the only fallback of concern is Bilinear to Affine
(Barycentric), which is alaso sparse_colr method. This also ensures
correct two and one color Barycentric handling.
*/
sparse_method = (SparseColorMethod) distort_method;
if ( distort_method == ShepardsDistortion )
sparse_method = method; /* return non-distort methods to normal */
if ( sparse_method == InverseColorInterpolate )
coeff[0]=0.5; /* sqrt() the squared distance for inverse */
}
/* Verbose output */
if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) {
switch (sparse_method) {
case BarycentricColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Barycentric Sparse Color:\n");
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Bilinear Sparse Color\n");
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
break;
}
default:
/* sparse color method is too complex for FX emulation */
break;
}
}
/* Generate new image for generated interpolated gradient.
* ASIDE: Actually we could have just replaced the colors of the original
* image, but IM Core policy, is if storage class could change then clone
* the image.
*/
sparse_image=CloneImage(image,0,0,MagickTrue,exception);
if (sparse_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sparse_image,DirectClass,exception) == MagickFalse)
{ /* if image is ColorMapped - change it to DirectClass */
sparse_image=DestroyImage(sparse_image);
return((Image *) NULL);
}
{ /* ----- MAIN CODE ----- */
CacheView
*sparse_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
j;
status=MagickTrue;
progress=0;
sparse_view=AcquireAuthenticCacheView(sparse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sparse_image,sparse_image->rows,1)
#endif
for (j=0; j < (ssize_t) sparse_image->rows; j++)
{
MagickBooleanType
sync;
PixelInfo
pixel; /* pixel to assign to distorted image */
register ssize_t
i;
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(sparse_view,0,j,sparse_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(sparse_image,&pixel);
for (i=0; i < (ssize_t) image->columns; i++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (sparse_method)
{
case BarycentricColorInterpolate:
{
register ssize_t x=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
break;
}
case InverseColorInterpolate:
case ShepardsColorInterpolate:
{ /* Inverse (Squared) Distance weights average (IDW) */
size_t
k;
double
denominator;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=0.0;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=0.0;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=0.0;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=0.0;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=0.0;
denominator = 0.0;
for(k=0; k<number_arguments; k+=2+number_colors) {
register ssize_t x=(ssize_t) k+2;
double weight =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
weight = pow(weight,coeff[0]); /* inverse of power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red += arguments[x++]*weight;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green += arguments[x++]*weight;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue += arguments[x++]*weight;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black += arguments[x++]*weight;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha += arguments[x++]*weight;
denominator += weight;
}
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red/=denominator;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green/=denominator;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue/=denominator;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black/=denominator;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha/=denominator;
break;
}
case ManhattanColorInterpolate:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for(k=0; k<number_arguments; k+=2+number_colors) {
double distance =
fabs((double)i-arguments[ k ])
+ fabs((double)j-arguments[k+1]);
if ( distance < minimum ) {
register ssize_t x=(ssize_t) k+2;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=arguments[x++];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=arguments[x++];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=arguments[x++];
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=arguments[x++];
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=arguments[x++];
minimum = distance;
}
}
break;
}
case VoronoiColorInterpolate:
default:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for (k=0; k<number_arguments; k+=2+number_colors) {
double distance =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
if ( distance < minimum ) {
register ssize_t x=(ssize_t) k+2;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=arguments[x++];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=arguments[x++];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=arguments[x++];
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=arguments[x++];
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=arguments[x++];
minimum = distance;
}
}
break;
}
}
/* set the color directly back into the source image */
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=(MagickRealType) ClampPixel(QuantumRange*pixel.red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=(MagickRealType) ClampPixel(QuantumRange*pixel.green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=(MagickRealType) ClampPixel(QuantumRange*pixel.blue);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=(MagickRealType) ClampPixel(QuantumRange*pixel.black);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=(MagickRealType) ClampPixel(QuantumRange*pixel.alpha);
SetPixelViaPixelInfo(sparse_image,&pixel,q);
q+=GetPixelChannels(sparse_image);
}
sync=SyncCacheViewAuthenticPixels(sparse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SparseColorImage)
#endif
proceed=SetImageProgress(image,SparseColorTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sparse_view=DestroyCacheView(sparse_view);
if (status == MagickFalse)
sparse_image=DestroyImage(sparse_image);
}
coeff = (double *) RelinquishMagickMemory(coeff);
return(sparse_image);
}
|
test.c |
#include <stdlib.h>
#include <stdio.h>
#include "omp.h"
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define N 10
int main()
{
double a[N], a_h[N];
double b[N], c[N];
int fail = 0;
check_offloading();
long cpuExec = 0;
#pragma omp target map(tofrom: cpuExec)
{
cpuExec = omp_is_initial_device();
}
// taskloop is only implemented on the gpu
if (!cpuExec) {
// Test: basic with shared
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
#pragma omp target map(tofrom:a) map(to:b,c)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop simd shared(a)
for(int i = 0 ; i < N; i++) {
a[i] += b[i] + c[i];
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i];
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// Test: if clause
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
#pragma omp target map(tofrom:a) map(to:b,c)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop simd shared(a) if(0) //undeferred execution of task
for(int i = 0 ; i < N; i++) {
a[i] += b[i] + c[i];
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i];
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// Test: private clause
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
int myId = -1;
#pragma omp target map(tofrom:a) map(to:b,c)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop simd shared(a) private(myId)
for(int i = 0 ; i < N; i++) {
myId = omp_get_thread_num();
a[i] += b[i] + c[i] + myId;
}
}
// myId == 0 for all iterations because we execute the entire loop on a single thread (the master)
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i] + 0;
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// Test: firstprivate clause
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
myId = -1;
#pragma omp target map(tofrom:a) map(to:b,c)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop simd shared(a) firstprivate(myId)
for(int i = 0 ; i < N; i++) {
myId += omp_get_thread_num();
a[i] += b[i] + c[i] + myId;
}
}
// myId == 0 for all iterations because we execute the entire loop on a single thread (the master)+
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i] + (-1);
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// Test: lastprivate clause
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
double lp = -1;
#pragma omp target map(tofrom:a) map(to:b,c) map(tofrom:lp)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop simd shared(a) lastprivate(myId)
for(int i = 0 ; i < N; i++) {
a[i] += b[i] + c[i];
lp = a[i];
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i];
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (lp != a[N-1]) {
printf("Latpriv Error device = %lf, host = %lf\n", lp, a_h[N-1]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// Test: default clause
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
#pragma omp target map(tofrom:a) map(to:b,c) map(tofrom:lp)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop simd default(shared)
for(int i = 0 ; i < N; i++) {
a[i] += b[i] + c[i];
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i];
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// Test: grainsize
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
#pragma omp target map(tofrom:a) map(to:b,c) map(tofrom:lp)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop simd grainsize(3)
for(int i = 0 ; i < N; i++) {
a[i] += b[i] + c[i];
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i];
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// Test: num_tasks clause
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
#pragma omp target map(tofrom:a) map(to:b,c) map(tofrom:lp)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop simd num_tasks(5)
for(int i = 0 ; i < N; i++) {
a[i] += b[i] + c[i];
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i];
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// Test: collapse clause
fail = 0;
int ma[N][N], mb[N][N], mc[N][N];
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++) {
ma[i][j] = -1;
mb[i][j] = i;
mc[i][j] = 2*i;
}
#pragma omp target map(tofrom: ma) map(to: mb,mc)
#pragma omp parallel
#pragma omp single
#pragma omp taskloop simd collapse(2)
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++)
ma[i][j] += mb[i][j] + mc[i][j];
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++)
if (ma[i][j] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, ma[i][j], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: final clause
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
#pragma omp target map(tofrom:a) map(to:b,c) map(tofrom:lp)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop simd final(1)
for(int i = 0 ; i < N; i++) {
a[i] += b[i] + c[i];
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i];
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// Test: priority clause
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
#pragma omp target map(tofrom:a) map(to:b,c) map(tofrom:lp)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop simd priority(10)
for(int i = 0 ; i < N; i++) {
a[i] += b[i] + c[i];
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i];
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// Test: untied clause
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
#pragma omp target map(tofrom:a) map(to:b,c) map(tofrom:lp)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop simd untied
for(int i = 0 ; i < N; i++) {
a[i] += b[i] + c[i];
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i];
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// Test: mergeable clause
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
#pragma omp target map(tofrom:a) map(to:b,c) map(tofrom:lp)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop simd mergeable
for(int i = 0 ; i < N; i++) {
a[i] += b[i] + c[i];
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i];
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// Test: nogroup clause
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
#pragma omp target map(tofrom:a) map(to:b,c) map(tofrom:lp)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop simd nogroup
for(int i = 0 ; i < N; i++) {
a[i] += b[i] + c[i];
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i];
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// Test: safelen clause
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
#pragma omp target map(tofrom:a) map(to:b,c) map(tofrom:lp)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop simd safelen(2) // taskloop is sequentialized: safelen can be 0
for(int i = 0 ; i < N; i++) {
a[i] += b[i] + c[i];
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i];
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// Test: simdlen clause
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
#pragma omp target map(tofrom:a) map(to:b,c) map(tofrom:lp)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop simd simdlen(32)
for(int i = 0 ; i < N; i++) {
a[i] += b[i] + c[i];
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i];
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// compiler assert
#if 0
// Test: linear clause
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
int l = 0;
#pragma omp target map(tofrom:a) map(to:b,c) map(tofrom:lp)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop simd linear(l:2)
for(int i = 0 ; i < N; i++) {
a[i] += b[i] + c[i] + l;
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i] + (i*2);
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
#endif
// Test: safelen clause
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
#pragma omp target map(tofrom:a) map(to:b,c) map(tofrom:lp)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop simd aligned(a,b,c)
for(int i = 0 ; i < N; i++) {
a[i] += b[i] + c[i];
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i];
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
} else // if !cpuExec
DUMP_SUCCESS(17);
return 0;
}
|
sxc_fmt_plug.c | /* SXC cracker patch for JtR. Hacked together during Summer of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted. */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_sxc;
#elif FMT_REGISTERS_H
john_register_one(&fmt_sxc);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "johnswap.h"
#include "sha.h"
#include <openssl/blowfish.h>
#include "pbkdf2_hmac_sha1.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2 // tuned on core i7
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "sxc"
#define FORMAT_NAME "StarOffice .sxc"
#define FORMAT_TAG "$sxc$*"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME " Blowfish"
#else
#define ALGORITHM_NAME "SHA1 Blowfish 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 20
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_ALIGN sizeof(int)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests sxc_tests[] = {
{"$sxc$*0*0*1024*16*4448359828281a1e6842c31453473abfeae584fb*8*dc0248bea0c7508c*16*1d53770002fe9d8016064e5ef9423174*860*864*f00399ab17b9899cd517758ecf918d4da78099ccd3557aef5e22e137fd5b81f732fc7c167c4de0cf263b4f82b50e3d6abc65da613a36b0025d89e1a09adeb4106da28040d1019bb4b36630fc8bc94fe5b515504bf8a92ea630bb95ace074868e7c10743ec970c89895f44b975a30b6ca032354f3e73ec86b2cc7a4f7a185884026d971b37b1e0e650376a2552e27ba955c700f8903a82a6df11f6cc2ecf63290f02ffdd278f890d1db75f9e8bd0f437c4ec613d3c6dcb421bbd1067be633593ba9bd58f77ef08e0cca64c732f892567d20de8d4c444fa9c1c1adc5e4657ef9740cb69ce55c8f9e6b1cfed0739ef002f1e1c1e54a5df50a759d92354f78eb90a9d9378f36df7d1edd8002ea0d637604fcd2408494c2d42b1771e2a2a20b55044836f76db4ed71e8a53f55a04f9437946603e7246c2d2d70caf6be0de82e8977fab4de84ca3783baedac041195d8b51166b502ff80c17db78f63d3632df1d5ef5b14d8d5553fc40b072030f9e3374c93e929a490c6cfb170f04433fc46f43b9c7d27f3f8c4ed759d4a20c2e53a0701b7c3d9201390a9b5597ce8ba35bd765b662e2242b9821bbb63b6be502d2150fff37e4b7f2a6b592fd0e319a7349df320e7fe7da600a2a05628dc00e04d480c085417f676bd0518bc39d9a9be34fc0cb192d5fa5e0c657cdf7c1ad265a2e81b90ac8b28d326f98b8f33c123df83edc964d2c17a904d0df8bd9ecbf629929d6e48cadc97f49a8941ada3d219e8c0f04f37cecc9a50cc5307fd2a488c34829b05cd1615ae0d1ef0ce450529aa755f9ae38332187ffe4144990de3265afaacb9f0f0fb9c67f6210369f7a0cc5bb346412db08e0f4732f91aa8d4b32fe6eece4fba118f118f6df2fb6c53fa9bc164c9ab7a9d414d33281eb0c3cd02abe0a4dd1c170e41c1c960a8f12a48a7b5e1f748c08e1b150a4e389c110ea3368bc6c6ef2bee98dc92c6825cbf6aee20e690e116c0e6cf48d49b38035f6a9b0cd6053b9f5b9f8360024c9c608cbba3fe5e7966b656fa08dec3e3ce3178a0c0007b7d177c7c44e6a68f4c7325cb98264b1e0f391c75a6a8fd3691581fb68ef459458830f2138d0fd743631efd92b742dfeb62c5ea8502515eb65af414bf805992f9272a7b1b745970fd54e128751f8f6c0a4d5bc7872bc09c04037e1e91dc7192d68f780cdb0f7ef6b282ea883be462ffeffb7b396e30303030", "openwall"},
{"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*259cafe530bd09f8*16*8f53ea878d0795cfe05dcc65fb272c20*1024*1024*ffb0f736b69d8433f958e8f475f609948ad7c9dd052f2b92c14cb1b395ffcac043a3def76d58442e131701b3b53d56ea570633bb20c83068542420160f5db3cee5eece05b67b54d0d4cdf3fbfd928d94852924e391aa3f70cad598b48b946399b0cd1e9e7e7d081a888933f8a1973e83166799396e8290771463c623620b51fb5310b9b0e0de3e5597b66a091301ada3ba6a5d7515d1fcf0eff65e543b50f8fe2222619600054eaf69c7aa680c96bc403f115cab32d6d8e8bc0db3904a71ce3eb1283ca73fd6f75845d9b7d0275e4727a0f56bfbf962a9079f51849de2c9dee7f1dadbbae944f442169281004773309b0f8d68f2cf83076fd8b19afbccf5ab7dc58fb9554fee82e2c491d6434a4cef6f3209775343c840bc4bdfab6705000e67412ac74d00f5b6aba1fd21bca5213234a5a1100a9d93daa141a4936723ea77d008a35c9078167a3529706432b36b9ec012c060d093535c85ca6feb75165d620d7d663c3e76b9bf3af614556ed8560b446a8a73649cb935383a30b4fd8fd75522203e4575cf4bc2b7f01a9294310fe021c71acbf68f6f1e95f48c30c14151c51d4fb878a16272ee73753bc750cbd48007c842412ca1dcb6214215b082c00d619a5318e2ebe9149410f501170093784afc2bd71dd9f5a87b349b96661747b1627e8cba8a5c98559fb146fa7e30db4c6f648ce3c2209f84551a7a1cd46d9172ae1354b6d093f89f6f5f58d29c1d7af8830df62e67753caa8166322caa0f8adf4b61d2013d35baa7c002e1d4c83b1cba8aaa57cf4946627fa63ba7a6a5a5c803e8d5a4794845ab670ef950b918a360cd9f12e8f3424ecab1f505cb494ad35f28d12ff183471d0f47bd67e6abd3b8c8e206d11149474a19b5c13d165d8f6dc39cf579fe1000295328aeeb82e0ae8020d2f61e4c3d6e68c25a655ab72aad5e9e74af4cf27c74158fdb1a29a3d76cd658976fa0a30743247408df00a23b593f68861348a6c46af05d21a4b81fedbf5715462ec8ffc5f001a85c43058ac1fab488236588ef0bf08dd8dd7c7fce630a0a996395b503647d9a2f0dd63dd2f939eca8e1849ee4ed41a6d5672d947177e8f890692de879a20dd9e366ec494d270faf0d24fc076172a25998aac218586404687e7c77b55e77e0eff9b1c65c3f8da99deaa86411ab6aca2531d84b364349591bc73e7504163afd23c5208e321883ee611ea7e4e5885086e4fa7196e16b948cb54808b64b94106c74900e3190fd5f6068b490fd0c9c64481771527a0e2d00899fd5b7a9e7f508cc6770018fadf09d965d7a12ad3624d2161d9546d4a7937b5f961d7f7c4714786380c147e1ec6b0583503bd5a139b892831d1ea925993bb86f12e75d9010ceba230a1c286fa3d1d654a1672313cbf0763c05c622cee452f76957c42ba0e853ecda163d15e8600a702ccdc9e8f88a", "Ghe+t0Blaster"},
{"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*9bb755c8a4fe8c34*16*112b9d41098c8677615755361da473a6*1024*1024*b95f0f2e0e1c7b4ee61168b646804d4b70b615f3c978cec65c9a7ab515417c79625d104373fd5012c3da6b356f8408a3a75edcc8b2aad0aa38bb33edd8933bdadbffde35a350ade73ccb9df29c2996082f5e94e324496835f8dfebe15ca38950e0f435d711ef964aa09915d58287967b5e321ca195a7f90253157afe82329da9a496c97292419b9a94cdb92f919e6d54700466aff61c200c5a355905b5a37c12d77b0e4ffd23f0204cfa664f4c0545f233db8d35af5fe337b459135da398fd23101becb194db305496474ba4179a7355285a9ec935044e1831f290f5f87ed3e00925e7fb4fc6bc38d9f0cfe9abf72560400490d2fd398d2d49516b618f99168602f323dd1786bcca394830341dfbeb377f9b7ef161dc1470f5e92b6152fa7a4f428e8ae40100791491a9e1c9385298522320488f00535866ac6e08354a75b8b2fd293066da7eb6b4ad7f3e13c8dc98cd815b2393f147fdac6279f76fdac9abd0a94131fa84fe4e99634a362a56d60ce588f6e0b66d6f8b6d411511272ffe32181d20e7d2c3d4b680764607afb2c29dcb94a845b920e96f6c27575534f8b7f9ddd93bdcef0d717d0a899fa937e7d2eeeb6d5b0338757f6e69dac72524d4b6f74edce1f937008eb3653bcc31a88712af940cf47ec3f3efd83e4da89d1a6cb7da6cf8d7d41430bc81a4b5d7bb46cad687f2f505e3379143ae274eed6201c3b17c1e05e516a14cbf2351ccf9fdd46e1309afb170bd01eb8f6a1d8e12441525199455fb550e3fc689b1801332b2d985e336b158f846fcbca18fbe6ea21438cf1fb5fdbce8d6350e65d6468342880845675ec721af2fb9df917a3968b4a1a477fc4c74ee38a71a230d77c2a7cf66ae6b83804488cbd25213ebc470cd845a2691b16161a640ebb385aa2381dc91f692f6c4ca2709b5a7e94dfb4548000a29b56f1da08701945d6209fabbd1621b28849fc27810775f1a0e0204d3ae9040a8cfb1386499a39d87149cfc1579de7d059662ad25a67abd42b30bb3608f09142ca030351c3a1e921e4c7bbc11aab846ef42eb5d1418c15ada77539aca096e0678439cd1b60950d2aa0cc4d2004b1ac48dc6a454c5a8e9ea7e910047c7c83895fd614fd9dfd961631eb23757646143c2aeb03c1a6476e78fc4ccf0f02cc1f88ec1b0080a170ac6871dc183939f7a4376965b0dfa7922012582eec4846ee621edc5547a2b9c4893e7f67f76541a4bd4a91827a57b3db5cdea29a2a3cc20238d89c8145c14b037360ad27f54f87317ef70472d6b1fd9f1168bcf8aba6071257b3adebab8d4e115188ed4af3fc3574fdccb4bc7eeb00a6a442f1b96a989b735f5e6059ec72c1677b77f437dcb93066f8591a11071799c3a0ec3b48f6160976aff1928c375358837e1ef02e20397b2e9d8d9c4bff23172c9b4c0b941cb1b49b5bc070f72a14cd384", "M1racl33"},
{"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*ceb1edb1e3cb72fd*16*f7104c9b2789540f5fd4beef009c0139*1024*1024*709130b940a9663d0a5687133c6f78535d05a72936faed8c2f3c1b4e29423baaabcee4f0d7d57e3ad8d8c090486f974c4d0ce4be5b29ef8e1b02c01b4af1959ed0b277146a45aec35a48997b584b82697803193644eefd88a7eefcae8819839e13702f887278a597dd954babd82bf71bf8ca8559af0e5537be0264e358d36b4f5067960edf608de731e04d117de953386aadee71849edbc494fac3e6b14567a9e9c545a06d402acd3158441829f25478ed0f9086dabd2d3913b123b43c27176f8f08f30312d84e82d47654097a2bce95554357db3ce3d45a7441472067f55e4ea6244a3dedc23db4bea8f549109ffac382cf5b652c5b1ee431bcab1051567c263a9d668c5d6a15a6f8da754914746c1d3c7eb6347bdd8d6a3ac82e4c742fcf8721913c111dfd5398f2698db00f7220d2a3562e02f7f7a6505af3ba1ee10b46f2ab5b5d2f52d288fd12814c6edbcb8d50b6e8716fba0d5962747b971689fe75e94fa36ec39598ea30e15ab2b9c9f22ca04b890a13b18fb3c7a962050426bb2da08c8b993608b9c1ffd0a21e0c74e993242ead8eb30f86d7d2dcdbd4774d85c2e06adbe4b40050ff0ac1a8afe8fbc2175ec4da4676a691b1fce38421175734c20f07a604fea5287e1c33b420aa9db4de9bd97382c161b4ec0818add675e52ebf036aad779f24b824be4b2b013c470ff66cbf44f5800e128a3b328e80a5fd6295b9b3a94e915f9add6710cb9444432751a7a31c3a3422f48a5eabc26d9a52571b8447bdd0a5977ff7153d95337cef7ff2ec29774332fbeed6ee5eed5e12288cc13e14ba9d5ff3dd052e28ba96715f5b95d7ea214ebcd9e60b26308eb11370b824b5cff2644dd2117985b3c25ba8076d4025cf3a3a62da62d5e11d44422a142048e8cd00c7de6a0a55fd5dc09a3ed01dfe35b88268f351b6ff289fee8e52ac29fe32d9990e0d6d87f39727b6a762bac9d509c6ea235fc8bedc3bec2143eae9fd2cb831b798ef8261d72785002638b940947de0aad64f791f9a27e5b091e55adf4aee0649f6785bdd37e0248fedd1759d771aeacacb3ff6e7cf2d045f791428ab61710b54e869213393caf1b6bc99066678351deafc290cecc1f6b40b5532adbbab9a70408c61a437d4483b6a75cb61a55b20881efc0d849e0f60c1887f0fa091672179a145c4ab1b6487a0e939e0123d5aaffa3aec66ab593f9c25d27f22f4a73a999a4ab45e8bc7d71a85e2d40afadad1a1dc0b8389f96f91614293fa205583ef1c3440e3df50e8aa5f1a13e5929b72cd003461ff03d44d8c84bdada176b24459021d398b2b91b61a9c0b553a8714c703d32452c691a33f1581e98c2439514ca3e7deeef90850f8d6d89bf1d3a5762a56ef769ea588f5c1705bfb7b944cfbbb0632718ee3722f4e1929b35706d6413a315a11bc16349af109a7e675df2ab1eebe93", "excel123"},
{NULL}
};
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[32 / sizeof(uint32_t)];
static struct custom_salt {
int cipher_type; // FIXME: cipher_type seems to be ignored
int checksum_type;
int iterations;
int key_size;
int iv_length;
int salt_length;
int original_length;
int length;
unsigned char iv[16];
unsigned char salt[32];
unsigned char content[1024];
} *cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
char *p;
int res, extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN;
if ((p = strtokm(ctcopy, "*")) == NULL) /* cipher type */
goto err;
res = atoi(p);
if (res != 0 && res != 1)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* checksum type */
goto err;
res = atoi(p);
if (res != 0 && res != 1)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iterations */
goto err;
res = atoi(p);
if (res <= 0)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* key size */
goto err;
res = atoi(p);
if (res != 16 && res != 32)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* checksum field (skipped) */
goto err;
if (hexlenl(p, &extra) != BINARY_SIZE * 2 || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv length */
goto err;
res = atoi(p);
if (res <= 0 || res > 16)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv */
goto err;
if (hexlenl(p, &extra) != res * 2 || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt length */
goto err;
res = atoi(p);
if (res <= 0 || res > 32)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt */
goto err;
if (hexlenl(p, &extra) != res * 2 || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* original length */
goto err;
res = atoi(p);
if (res <= 0 || res > 1024) /* 1024 because of "unsigned char output[1024];" in crypt_all */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* length */
goto err;
res = atoi(p);
if (res <= 0 || res > 1024)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* content */
goto err;
if (hexlenl(p, &extra) != res * 2 || extra)
goto err;
if (strtokm(NULL, "*") != NULL) /* the end */
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += FORMAT_TAG_LEN; /* skip over "$sxc$*" */
p = strtokm(ctcopy, "*");
cs.cipher_type = atoi(p);
p = strtokm(NULL, "*");
cs.checksum_type = atoi(p);
p = strtokm(NULL, "*");
cs.iterations = atoi(p);
p = strtokm(NULL, "*");
cs.key_size = atoi(p);
strtokm(NULL, "*");
/* skip checksum field */
p = strtokm(NULL, "*");
cs.iv_length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.iv_length; i++)
cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.salt_length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.salt_length; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.original_length = atoi(p);
p = strtokm(NULL, "*");
cs.length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.length; i++)
cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
ctcopy += 6; /* skip over "$sxc$*" */
strtokm(ctcopy, "*");
strtokm(NULL, "*");
strtokm(NULL, "*");
strtokm(NULL, "*");
p = strtokm(NULL, "*");
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
MEM_FREE(keeptr);
return out;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char key[MAX_KEYS_PER_CRYPT][32];
unsigned char hash[MAX_KEYS_PER_CRYPT][32];
BF_KEY bf_key;
int bf_ivec_pos;
unsigned char ivec[8];
unsigned char output[1024];
int i;
SHA_CTX ctx;
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
SHA1_Init(&ctx);
SHA1_Update(&ctx, (unsigned char *)saved_key[index+i], strlen(saved_key[index+i]));
SHA1_Final((unsigned char *)hash[i], &ctx);
}
#ifdef SIMD_COEF_32
{
int lens[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = 20;
pin[i] = (unsigned char*)hash[i];
pout[i] = key[i];
}
pbkdf2_sha1_sse((const unsigned char**)pin, lens, cur_salt->salt,
cur_salt->salt_length,
cur_salt->iterations, pout,
cur_salt->key_size, 0);
}
#else
pbkdf2_sha1(hash[0], 20, cur_salt->salt,
cur_salt->salt_length,
cur_salt->iterations, key[0],
cur_salt->key_size, 0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
bf_ivec_pos = 0;
memcpy(ivec, cur_salt->iv, 8);
BF_set_key(&bf_key, cur_salt->key_size, key[i]);
BF_cfb64_encrypt(cur_salt->content, output, cur_salt->length, &bf_key, ivec, &bf_ivec_pos, 0);
SHA1_Init(&ctx);
SHA1_Update(&ctx, output, cur_salt->original_length);
SHA1_Final((unsigned char*)crypt_out[index+i], &ctx);
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void sxc_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->iterations;
}
struct fmt_main fmt_sxc = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{
"iteration count",
},
{ FORMAT_TAG },
sxc_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
sxc_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 24;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
center_sse.h | #include "util_sse.h"
void inplace_center_and_trace_atom_major(
float* coords,
float* traces,
const int n_frames,
const int n_atoms
) {
/* Center a trajectory containing multiple conformations inplace.
The coordinates are store in float, but the accumulation is done in
double.
Also compute the traces of the centered conformations, which are necessary
for RMSD.
*/
long long i, k;
float* confp;
__m128d sx_, sy_, sz_, trace_;
__m128 mux_, muy_, muz_;
float sxf, syf, szf;
double sx[2], sy[2], sz[2], trace[2];
__m128 x, y, z, x2, y2, z2;
#ifdef _OPENMP
#pragma omp parallel for shared(coords, traces) \
private(sx_, sy_, sz_, trace_, mux_, muy_, muz_, sxf, syf, szf, \
confp, i, x, y, z, x2, y2, z2, sx, sy, sz, trace)
#endif
for (k = 0; k < n_frames; k++) {
confp = &coords[k * n_atoms * 3];
sx_ = sy_ = sz_ = trace_ = _mm_setzero_pd();
for (i = 0; i < n_atoms/4; i++) {
aos_deinterleaved_loadu(confp, &x, &y, &z);
/* accumulate the sums of each coordinate in double */
/* get the first two values from each float4 */
sx_ = _mm_add_pd(sx_, _mm_cvtps_pd(x));
sy_ = _mm_add_pd(sy_, _mm_cvtps_pd(y));
sz_ = _mm_add_pd(sz_, _mm_cvtps_pd(z));
/* and shuffle in the second two values */
sx_ = _mm_add_pd(sx_, _mm_cvtps_pd(_mm_movehl_ps(x, x)));
sy_ = _mm_add_pd(sy_, _mm_cvtps_pd(_mm_movehl_ps(y, y)));
sz_ = _mm_add_pd(sz_, _mm_cvtps_pd(_mm_movehl_ps(z, z)));
confp += 12;
}
/* copy the summed coordinates out of the SSE registers */
_mm_storeu_pd(sx, sx_);
_mm_storeu_pd(sy, sy_);
_mm_storeu_pd(sz, sz_);
/* Add the last couple entries that weren't a factor of four */
for (i = 0; i < n_atoms % 4; i++) {
sx[0] += confp[i*3 + 0];
sy[0] += confp[i*3 + 1];
sz[0] += confp[i*3 + 2];
}
/* Put everything into the first value. We're doing this here, as */
/* opposed to using a SSE horizontal add. */
sx[0] += sx[1];
sy[0] += sy[1];
sz[0] += sz[1];
/* Now we want mean x, y, and z positions */
sx[0] /= n_atoms;
sy[0] /= n_atoms;
sz[0] /= n_atoms;
/* Load these mean positions back into the SSE registers */
sxf = (float) sx[0];
syf = (float) sy[0];
szf = (float) sz[0];
mux_ = _mm_load1_ps(&sxf);
muy_ = _mm_load1_ps(&syf);
muz_ = _mm_load1_ps(&szf);
/* And subtract them out */
confp = &coords[k * n_atoms * 3];
for (i = 0; i < n_atoms/4; i++) {
aos_deinterleaved_loadu(confp, &x, &y, &z);
x = _mm_sub_ps(x, mux_);
y = _mm_sub_ps(y, muy_);
z = _mm_sub_ps(z, muz_);
x2 = _mm_mul_ps(x, x);
y2 = _mm_mul_ps(y, y);
z2 = _mm_mul_ps(z, z);
trace_ = _mm_add_pd(trace_, _mm_cvtps_pd(x2));
trace_ = _mm_add_pd(trace_, _mm_cvtps_pd(y2));
trace_ = _mm_add_pd(trace_, _mm_cvtps_pd(z2));
trace_ = _mm_add_pd(trace_, _mm_cvtps_pd(_mm_movehl_ps(x2, x2)));
trace_ = _mm_add_pd(trace_, _mm_cvtps_pd(_mm_movehl_ps(y2, y2)));
trace_ = _mm_add_pd(trace_, _mm_cvtps_pd(_mm_movehl_ps(z2, z2)));
aos_interleaved_storeu(confp, x, y, z);
confp += 12;
}
_mm_storeu_pd(trace, trace_);
for (i = 0; i < n_atoms % 4; i++) {
confp[i*3 + 0] -= sxf;
confp[i*3 + 1] -= syf;
confp[i*3 + 2] -= szf;
trace[0] += confp[i*3 + 0]*confp[i*3 + 0];
trace[0] += confp[i*3 + 1]*confp[i*3 + 1];
trace[0] += confp[i*3 + 2]*confp[i*3 + 2];
}
trace[0] += trace[1];
if (traces != NULL)
traces[k] = (float) trace[0];
}
}
|
gather.c | #include "../../shared.h"
#include "hale.h"
#include <float.h>
#include <math.h>
#include <stdio.h>
// Gathers all of the subcell quantities on the mesh
void gather_subcell_mass_and_energy(
const int ncells, const int nnodes, double* cell_centroids_x,
double* cell_centroids_y, double* cell_centroids_z,
int* cells_to_nodes_offsets, const double* nodes_x, const double* nodes_y,
const double* nodes_z, const double* cell_volume, double* energy,
double* density, double* velocity_x, double* velocity_y, double* velocity_z,
double* ke_mass, double* cell_mass, double* subcell_mass,
double* subcell_volume, double* subcell_ie_mass, double* subcell_ke_mass,
double* subcell_centroids_x, double* subcell_centroids_y,
double* subcell_centroids_z, int* faces_to_cells0, int* faces_to_cells1,
int* cells_to_faces_offsets, int* cells_to_faces, int* cells_to_nodes,
int* nodes_to_cells_offsets, int* nodes_to_cells, double* initial_mass,
double* initial_ie_mass, double* initial_ke_mass);
// Gathers the momentum into the subcells
void gather_subcell_momentum(
const int nnodes, const double* nodal_volumes, const double* nodal_mass,
int* nodes_to_cells, const double* nodes_x, const double* nodes_y,
const double* nodes_z, double* velocity_x, double* velocity_y,
double* velocity_z, double* subcell_volume, double* cell_centroids_x,
double* cell_centroids_y, double* cell_centroids_z,
double* subcell_momentum_x, double* subcell_momentum_y,
double* subcell_momentum_z, double* subcell_centroids_x,
double* subcell_centroids_y, double* subcell_centroids_z,
int* nodes_to_cells_offsets, int* cells_to_nodes_offsets,
int* cells_to_nodes, int* nodes_to_nodes_offsets, int* nodes_to_nodes,
vec_t* initial_momentum);
// gathers all of the subcell quantities on the mesh
void gather_subcell_quantities(UnstructuredMesh* umesh, HaleData* hale_data,
vec_t* initial_momentum, double* initial_mass,
double* initial_ie_mass,
double* initial_ke_mass) {
/*
* GATHERING STAGE OF THE REMAP
*/
// Calculates the cell volume, subcell volume and the subcell centroids
calc_volumes_centroids(
umesh->ncells, umesh->nnodes, hale_data->nnodes_by_subcell,
umesh->cells_to_nodes_offsets, umesh->cells_to_nodes,
hale_data->subcells_to_faces_offsets, hale_data->subcells_to_faces,
umesh->faces_to_nodes, umesh->faces_to_nodes_offsets,
umesh->faces_cclockwise_cell, umesh->nodes_x0, umesh->nodes_y0,
umesh->nodes_z0, hale_data->subcell_centroids_x,
hale_data->subcell_centroids_y, hale_data->subcell_centroids_z,
hale_data->subcell_volume, hale_data->cell_volume,
hale_data->nodal_volumes, umesh->nodes_to_cells_offsets,
umesh->nodes_to_cells);
// Gathers all of the subcell quantities on the mesh
gather_subcell_mass_and_energy(
umesh->ncells, umesh->nnodes, umesh->cell_centroids_x,
umesh->cell_centroids_y, umesh->cell_centroids_z,
umesh->cells_to_nodes_offsets, umesh->nodes_x0, umesh->nodes_y0,
umesh->nodes_z0, hale_data->cell_volume, hale_data->energy0,
hale_data->density0, hale_data->velocity_x0, hale_data->velocity_y0,
hale_data->velocity_z0, hale_data->ke_mass, hale_data->cell_mass,
hale_data->subcell_mass, hale_data->subcell_volume,
hale_data->subcell_ie_mass, hale_data->subcell_ke_mass,
hale_data->subcell_centroids_x, hale_data->subcell_centroids_y,
hale_data->subcell_centroids_z, umesh->faces_to_cells0,
umesh->faces_to_cells1, umesh->cells_to_faces_offsets,
umesh->cells_to_faces, umesh->cells_to_nodes,
umesh->nodes_to_cells_offsets, umesh->nodes_to_cells, initial_mass,
initial_ie_mass, initial_ke_mass);
// Gathers the momentum the subcells
gather_subcell_momentum(
umesh->nnodes, hale_data->nodal_volumes, hale_data->nodal_mass,
umesh->nodes_to_cells, umesh->nodes_x0, umesh->nodes_y0, umesh->nodes_z0,
hale_data->velocity_x0, hale_data->velocity_y0, hale_data->velocity_z0,
hale_data->subcell_volume, umesh->cell_centroids_x,
umesh->cell_centroids_y, umesh->cell_centroids_z,
hale_data->subcell_momentum_x, hale_data->subcell_momentum_y,
hale_data->subcell_momentum_z, hale_data->subcell_centroids_x,
hale_data->subcell_centroids_y, hale_data->subcell_centroids_z,
umesh->nodes_to_cells_offsets, umesh->cells_to_nodes_offsets,
umesh->cells_to_nodes, umesh->nodes_to_nodes_offsets,
umesh->nodes_to_nodes, initial_momentum);
}
// Gathers all of the subcell quantities on the mesh
void gather_subcell_mass_and_energy(
const int ncells, const int nnodes, double* cell_centroids_x,
double* cell_centroids_y, double* cell_centroids_z,
int* cells_to_nodes_offsets, const double* nodes_x, const double* nodes_y,
const double* nodes_z, const double* cell_volume, double* energy,
double* density, double* velocity_x, double* velocity_y, double* velocity_z,
double* ke_mass, double* cell_mass, double* subcell_mass,
double* subcell_volume, double* subcell_ie_mass, double* subcell_ke_mass,
double* subcell_centroids_x, double* subcell_centroids_y,
double* subcell_centroids_z, int* faces_to_cells0, int* faces_to_cells1,
int* cells_to_faces_offsets, int* cells_to_faces, int* cells_to_nodes,
int* nodes_to_cells_offsets, int* nodes_to_cells, double* initial_mass,
double* initial_ie_mass, double* initial_ke_mass) {
double total_mass = 0.0;
double total_ie_mass = 0.0;
double total_ke_mass = 0.0;
// We first have to determine the cell centered kinetic energy
#pragma omp parallel for reduction(+ : total_ke_mass)
for (int cc = 0; cc < ncells; ++cc) {
const int cell_to_nodes_off = cells_to_nodes_offsets[(cc)];
const int nnodes_by_cell =
cells_to_nodes_offsets[(cc + 1)] - cell_to_nodes_off;
ke_mass[(cc)] = 0.0;
// Subcells are ordered with the nodes on a face
for (int nn = 0; nn < nnodes_by_cell; ++nn) {
const int node_index = cells_to_nodes[(cell_to_nodes_off + nn)];
const int subcell_index = cell_to_nodes_off + nn;
ke_mass[(cc)] += subcell_mass[(subcell_index)] * 0.5 *
(velocity_x[(node_index)] * velocity_x[(node_index)] +
velocity_y[(node_index)] * velocity_y[(node_index)] +
velocity_z[(node_index)] * velocity_z[(node_index)]);
}
total_ke_mass += ke_mass[(cc)];
}
double total_ie_in_subcells = 0.0;
double total_ke_in_subcells = 0.0;
// Calculate the sub-cell internal and kinetic energies
#pragma omp parallel for reduction(+ : total_mass, total_ie_mass, \
total_ie_in_subcells, total_ke_in_subcells)
for (int cc = 0; cc < ncells; ++cc) {
// Calculating the volume dist necessary for the least squares
// regression
const int cell_to_faces_off = cells_to_faces_offsets[(cc)];
const int nfaces_by_cell =
cells_to_faces_offsets[(cc + 1)] - cell_to_faces_off;
const int cell_to_nodes_off = cells_to_nodes_offsets[(cc)];
const int nnodes_by_cell =
cells_to_nodes_offsets[(cc + 1)] - cell_to_nodes_off;
const double cell_ie = density[(cc)] * energy[(cc)];
const double cell_ke = ke_mass[(cc)] / cell_volume[(cc)];
vec_t cell_c = {0.0, 0.0, 0.0};
calc_centroid(nnodes_by_cell, nodes_x, nodes_y, nodes_z, cells_to_nodes,
cell_to_nodes_off, &cell_c);
vec_t ie_rhs = {0.0, 0.0, 0.0};
vec_t ke_rhs = {0.0, 0.0, 0.0};
vec_t coeff[3] = {{0.0, 0.0, 0.0}};
total_mass += cell_mass[(cc)];
total_ie_mass += cell_mass[(cc)] * energy[(cc)];
// Determine the weighted volume dist for neighbouring cells
double gmax_ie = -DBL_MAX;
double gmin_ie = DBL_MAX;
double gmax_ke = -DBL_MAX;
double gmin_ke = DBL_MAX;
for (int ff = 0; ff < nfaces_by_cell; ++ff) {
const int face_index = cells_to_faces[(cell_to_faces_off + ff)];
const int neighbour_index = (faces_to_cells0[(face_index)] == cc)
? faces_to_cells1[(face_index)]
: faces_to_cells0[(face_index)];
// Check if boundary face
if (neighbour_index == -1) {
continue;
}
vec_t dist = {cell_centroids_x[(neighbour_index)] - cell_c.x,
cell_centroids_y[(neighbour_index)] - cell_c.y,
cell_centroids_z[(neighbour_index)] - cell_c.z};
// Store the neighbouring cell's contribution to the coefficients
double neighbour_vol = cell_volume[(neighbour_index)];
coeff[0].x += 2.0 * (dist.x * dist.x) / (neighbour_vol * neighbour_vol);
coeff[0].y += 2.0 * (dist.x * dist.y) / (neighbour_vol * neighbour_vol);
coeff[0].z += 2.0 * (dist.x * dist.z) / (neighbour_vol * neighbour_vol);
coeff[1].x += 2.0 * (dist.y * dist.x) / (neighbour_vol * neighbour_vol);
coeff[1].y += 2.0 * (dist.y * dist.y) / (neighbour_vol * neighbour_vol);
coeff[1].z += 2.0 * (dist.y * dist.z) / (neighbour_vol * neighbour_vol);
coeff[2].x += 2.0 * (dist.z * dist.x) / (neighbour_vol * neighbour_vol);
coeff[2].y += 2.0 * (dist.z * dist.y) / (neighbour_vol * neighbour_vol);
coeff[2].z += 2.0 * (dist.z * dist.z) / (neighbour_vol * neighbour_vol);
const double neighbour_ie =
density[(neighbour_index)] * energy[(neighbour_index)];
const double neighbour_ke = ke_mass[(neighbour_index)] / neighbour_vol;
gmax_ie = max(gmax_ie, neighbour_ie);
gmin_ie = min(gmin_ie, neighbour_ie);
gmax_ke = max(gmax_ke, neighbour_ke);
gmin_ke = min(gmin_ke, neighbour_ke);
// Prepare the RHS, which includes energy differential
const double die = (neighbour_ie - cell_ie);
const double dke = (neighbour_ke - cell_ke);
ie_rhs.x += 2.0 * (dist.x * die) / neighbour_vol;
ie_rhs.y += 2.0 * (dist.y * die) / neighbour_vol;
ie_rhs.z += 2.0 * (dist.z * die) / neighbour_vol;
ke_rhs.x += 2.0 * (dist.x * dke) / neighbour_vol;
ke_rhs.y += 2.0 * (dist.y * dke) / neighbour_vol;
ke_rhs.z += 2.0 * (dist.z * dke) / neighbour_vol;
}
// Determine the inverse of the coefficient matrix
vec_t inv[3];
calc_3x3_inverse(&coeff, &inv);
// Solve for the internal and kinetic energy gradients
vec_t grad_ie = {
inv[0].x * ie_rhs.x + inv[0].y * ie_rhs.y + inv[0].z * ie_rhs.z,
inv[1].x * ie_rhs.x + inv[1].y * ie_rhs.y + inv[1].z * ie_rhs.z,
inv[2].x * ie_rhs.x + inv[2].y * ie_rhs.y + inv[2].z * ie_rhs.z};
vec_t grad_ke = {
inv[0].x * ke_rhs.x + inv[0].y * ke_rhs.y + inv[0].z * ke_rhs.z,
inv[1].x * ke_rhs.x + inv[1].y * ke_rhs.y + inv[1].z * ke_rhs.z,
inv[2].x * ke_rhs.x + inv[2].y * ke_rhs.y + inv[2].z * ke_rhs.z};
// Calculate the limiter for the gradient
double limiter = 1.0;
for (int nn = 0; nn < nnodes_by_cell; ++nn) {
const int node_index = cells_to_nodes[(cell_to_nodes_off + nn)];
limiter = min(limiter, calc_cell_limiter(cell_ie, gmax_ie, gmin_ie,
&grad_ie, nodes_x[(node_index)],
nodes_y[(node_index)],
nodes_z[(node_index)], &cell_c));
}
// This stops extrema from worsening as part of the gather. Is it
// conservative?
grad_ie.x *= limiter;
grad_ie.y *= limiter;
grad_ie.z *= limiter;
// Calculate the limiter for the gradient
limiter = 1.0;
for (int nn = 0; nn < nnodes_by_cell; ++nn) {
const int node_index = cells_to_nodes[(cell_to_nodes_off + nn)];
limiter = min(limiter, calc_cell_limiter(cell_ke, gmax_ke, gmin_ke,
&grad_ke, nodes_x[(node_index)],
nodes_y[(node_index)],
nodes_z[(node_index)], &cell_c));
}
// This stops extrema from worsening as part of the gather. Is it
// conservative?
grad_ie.x *= limiter;
grad_ie.y *= limiter;
grad_ie.z *= limiter;
// Subcells are ordered with the nodes on a face
for (int nn = 0; nn < nnodes_by_cell; ++nn) {
const int subcell_index = cell_to_nodes_off + nn;
// Calculate the center of mass distance
const double dx = subcell_centroids_x[(subcell_index)] - cell_c.x;
const double dy = subcell_centroids_y[(subcell_index)] - cell_c.y;
const double dz = subcell_centroids_z[(subcell_index)] - cell_c.z;
// Subcell internal and kinetic energy from linear function at cell
subcell_ie_mass[(subcell_index)] =
subcell_volume[(subcell_index)] *
(cell_ie + grad_ie.x * dx + grad_ie.y * dy + grad_ie.z * dz);
subcell_ke_mass[(subcell_index)] =
subcell_volume[(subcell_index)] *
(cell_ke + grad_ke.x * dx + grad_ke.y * dy + grad_ke.z * dz);
total_ie_in_subcells += subcell_ie_mass[(subcell_index)];
total_ke_in_subcells += subcell_ke_mass[(subcell_index)];
if (subcell_ie_mass[(subcell_index)] < -EPS ||
subcell_ke_mass[(subcell_index)] < -EPS) {
printf("Negative energy mass %d %.12f %.12f\n", subcell_index,
subcell_ie_mass[(subcell_index)],
subcell_ke_mass[(subcell_index)]);
}
}
}
*initial_mass = total_mass;
*initial_ie_mass = total_ie_in_subcells;
*initial_ke_mass = total_ke_in_subcells;
printf("Total Energy in Cells %.12f\n", total_ie_mass + total_ke_mass);
printf("Total Energy in Subcells %.12f\n",
total_ie_in_subcells + total_ke_in_subcells);
printf("Difference %.12f\n\n",
(total_ie_mass + total_ke_mass) -
(total_ie_in_subcells + total_ke_in_subcells));
}
// Gathers the momentum into the subcells
void gather_subcell_momentum(
const int nnodes, const double* nodal_volumes, const double* nodal_mass,
int* nodes_to_cells, const double* nodes_x, const double* nodes_y,
const double* nodes_z, double* velocity_x, double* velocity_y,
double* velocity_z, double* subcell_volume, double* cell_centroids_x,
double* cell_centroids_y, double* cell_centroids_z,
double* subcell_momentum_x, double* subcell_momentum_y,
double* subcell_momentum_z, double* subcell_centroids_x,
double* subcell_centroids_y, double* subcell_centroids_z,
int* nodes_to_cells_offsets, int* cells_to_nodes_offsets,
int* cells_to_nodes, int* nodes_to_nodes_offsets, int* nodes_to_nodes,
vec_t* initial_momentum) {
double initial_momentum_x = 0.0;
double initial_momentum_y = 0.0;
double initial_momentum_z = 0.0;
double total_subcell_vx = 0.0;
double total_subcell_vy = 0.0;
double total_subcell_vz = 0.0;
#pragma omp parallel for reduction(+ : initial_momentum_x, initial_momentum_y, \
initial_momentum_z, total_subcell_vx, \
total_subcell_vy, total_subcell_vz)
for (int nn = 0; nn < nnodes; ++nn) {
// Calculate the gradient for the nodal momentum
vec_t rhsx = {0.0, 0.0, 0.0};
vec_t rhsy = {0.0, 0.0, 0.0};
vec_t rhsz = {0.0, 0.0, 0.0};
vec_t coeff[3] = {{0.0, 0.0, 0.0}};
vec_t gmin = {DBL_MAX, DBL_MAX, DBL_MAX};
vec_t gmax = {-DBL_MAX, -DBL_MAX, -DBL_MAX};
vec_t node = {nodes_x[(nn)], nodes_y[(nn)], nodes_z[(nn)]};
const double nodal_density = nodal_mass[(nn)] / nodal_volumes[(nn)];
vec_t node_mom_density = {nodal_density * velocity_x[(nn)],
nodal_density * velocity_y[(nn)],
nodal_density * velocity_z[(nn)]};
initial_momentum_x += nodal_mass[(nn)] * velocity_x[(nn)];
initial_momentum_y += nodal_mass[(nn)] * velocity_y[(nn)];
initial_momentum_z += nodal_mass[(nn)] * velocity_z[(nn)];
const int node_to_nodes_off = nodes_to_nodes_offsets[(nn)];
const int nnodes_by_node =
nodes_to_nodes_offsets[(nn + 1)] - node_to_nodes_off;
for (int nn2 = 0; nn2 < nnodes_by_node; ++nn2) {
const int neighbour_index = nodes_to_nodes[(node_to_nodes_off + nn2)];
if (neighbour_index == -1) {
continue;
}
// Calculate the center of mass distance
vec_t i = {nodes_x[(neighbour_index)] - node.x,
nodes_y[(neighbour_index)] - node.y,
nodes_z[(neighbour_index)] - node.z};
// Store the neighbouring cell's contribution to the coefficients
double neighbour_vol = nodal_volumes[(neighbour_index)];
coeff[0].x += 2.0 * (i.x * i.x) / (neighbour_vol * neighbour_vol);
coeff[0].y += 2.0 * (i.x * i.y) / (neighbour_vol * neighbour_vol);
coeff[0].z += 2.0 * (i.x * i.z) / (neighbour_vol * neighbour_vol);
coeff[1].x += 2.0 * (i.y * i.x) / (neighbour_vol * neighbour_vol);
coeff[1].y += 2.0 * (i.y * i.y) / (neighbour_vol * neighbour_vol);
coeff[1].z += 2.0 * (i.y * i.z) / (neighbour_vol * neighbour_vol);
coeff[2].x += 2.0 * (i.z * i.x) / (neighbour_vol * neighbour_vol);
coeff[2].y += 2.0 * (i.z * i.y) / (neighbour_vol * neighbour_vol);
coeff[2].z += 2.0 * (i.z * i.z) / (neighbour_vol * neighbour_vol);
const double neighbour_nodal_density =
nodal_mass[(neighbour_index)] / nodal_volumes[(neighbour_index)];
vec_t neighbour_mom_density = {
neighbour_nodal_density * velocity_x[(neighbour_index)],
neighbour_nodal_density * velocity_y[(neighbour_index)],
neighbour_nodal_density * velocity_z[(neighbour_index)]};
gmax.x = max(gmax.x, neighbour_mom_density.x);
gmin.x = min(gmin.x, neighbour_mom_density.x);
gmax.y = max(gmax.y, neighbour_mom_density.y);
gmin.y = min(gmin.y, neighbour_mom_density.y);
gmax.z = max(gmax.z, neighbour_mom_density.z);
gmin.z = min(gmin.z, neighbour_mom_density.z);
vec_t dv = {(neighbour_mom_density.x - node_mom_density.x),
(neighbour_mom_density.y - node_mom_density.y),
(neighbour_mom_density.z - node_mom_density.z)};
rhsx.x += 2.0 * i.x * dv.x / neighbour_vol;
rhsx.y += 2.0 * i.y * dv.x / neighbour_vol;
rhsx.z += 2.0 * i.z * dv.x / neighbour_vol;
rhsy.x += 2.0 * i.x * dv.y / neighbour_vol;
rhsy.y += 2.0 * i.y * dv.y / neighbour_vol;
rhsy.z += 2.0 * i.z * dv.y / neighbour_vol;
rhsz.x += 2.0 * i.x * dv.z / neighbour_vol;
rhsz.y += 2.0 * i.y * dv.z / neighbour_vol;
rhsz.z += 2.0 * i.z * dv.z / neighbour_vol;
}
// Determine the inverse of the coefficient matrix
vec_t inv[3];
calc_3x3_inverse(&coeff, &inv);
// Solve for the velocity density gradients
vec_t grad_vx = {inv[0].x * rhsx.x + inv[0].y * rhsx.y + inv[0].z * rhsx.z,
inv[1].x * rhsx.x + inv[1].y * rhsx.y + inv[1].z * rhsx.z,
inv[2].x * rhsx.x + inv[2].y * rhsx.y + inv[2].z * rhsx.z};
vec_t grad_vy = {inv[0].x * rhsy.x + inv[0].y * rhsy.y + inv[0].z * rhsy.z,
inv[1].x * rhsy.x + inv[1].y * rhsy.y + inv[1].z * rhsy.z,
inv[2].x * rhsy.x + inv[2].y * rhsy.y + inv[2].z * rhsy.z};
vec_t grad_vz = {inv[0].x * rhsz.x + inv[0].y * rhsz.y + inv[0].z * rhsz.z,
inv[1].x * rhsz.x + inv[1].y * rhsz.y + inv[1].z * rhsz.z,
inv[2].x * rhsz.x + inv[2].y * rhsz.y + inv[2].z * rhsz.z};
// Limit the gradients
double vx_limiter = 1.0;
double vy_limiter = 1.0;
double vz_limiter = 1.0;
const int node_to_cells_off = nodes_to_cells_offsets[(nn)];
const int ncells_by_node =
nodes_to_cells_offsets[(nn + 1)] - node_to_cells_off;
for (int cc = 0; cc < ncells_by_node; ++cc) {
const int cell_index = nodes_to_cells[(node_to_cells_off + cc)];
vec_t cell_c = {cell_centroids_x[(cell_index)],
cell_centroids_y[(cell_index)],
cell_centroids_z[(cell_index)]};
vx_limiter =
min(vx_limiter,
calc_cell_limiter(node_mom_density.x, gmax.x, gmin.x, &grad_vx,
cell_c.x, cell_c.y, cell_c.z, &node));
vy_limiter =
min(vy_limiter,
calc_cell_limiter(node_mom_density.y, gmax.y, gmin.y, &grad_vy,
cell_c.x, cell_c.y, cell_c.z, &node));
vz_limiter =
min(vz_limiter,
calc_cell_limiter(node_mom_density.z, gmax.z, gmin.z, &grad_vz,
cell_c.x, cell_c.y, cell_c.z, &node));
}
// This stops extrema from worsening as part of the gather. Is it
// conservative?
grad_vx.x *= vx_limiter;
grad_vx.y *= vx_limiter;
grad_vx.z *= vx_limiter;
grad_vy.x *= vy_limiter;
grad_vy.y *= vy_limiter;
grad_vy.z *= vy_limiter;
grad_vz.x *= vz_limiter;
grad_vz.y *= vz_limiter;
grad_vz.z *= vz_limiter;
for (int cc = 0; cc < ncells_by_node; ++cc) {
const int cell_index = nodes_to_cells[(node_to_cells_off + cc)];
const int cell_to_nodes_off = cells_to_nodes_offsets[(cell_index)];
const int nnodes_by_cell =
cells_to_nodes_offsets[(cell_index + 1)] - cell_to_nodes_off;
// Determine the position of the node in the cell
int nn2;
for (nn2 = 0; nn2 < nnodes_by_cell; ++nn2) {
if (cells_to_nodes[(cell_to_nodes_off + nn2)] == nn) {
break;
}
}
const int subcell_index = cell_to_nodes_off + nn2;
const double vol = subcell_volume[(subcell_index)];
const double dx = subcell_centroids_x[(subcell_index)] - nodes_x[(nn)];
const double dy = subcell_centroids_y[(subcell_index)] - nodes_y[(nn)];
const double dz = subcell_centroids_z[(subcell_index)] - nodes_z[(nn)];
subcell_momentum_x[(subcell_index)] =
vol * (node_mom_density.x + grad_vx.x * dx + grad_vx.y * dy +
grad_vx.z * dz);
subcell_momentum_y[(subcell_index)] =
vol * (node_mom_density.y + grad_vy.x * dx + grad_vy.y * dy +
grad_vy.z * dz);
subcell_momentum_z[(subcell_index)] =
vol * (node_mom_density.z + grad_vz.x * dx + grad_vz.y * dy +
grad_vz.z * dz);
total_subcell_vx += subcell_momentum_x[(subcell_index)];
total_subcell_vy += subcell_momentum_y[(subcell_index)];
total_subcell_vz += subcell_momentum_z[(subcell_index)];
}
}
initial_momentum->x = total_subcell_vx;
initial_momentum->y = total_subcell_vy;
initial_momentum->z = total_subcell_vz;
printf("Total Momentum in Cells (%.12f,%.12f,%.12f)\n", initial_momentum_x,
initial_momentum_y, initial_momentum_z);
printf("Total Momentum in Subcells (%.12f,%.12f,%.12f)\n", total_subcell_vx,
total_subcell_vy, total_subcell_vz);
printf("Difference (%.12f,%.12f,%.12f)\n\n",
initial_momentum_x - total_subcell_vx,
initial_momentum_y - total_subcell_vy,
initial_momentum_z - total_subcell_vz);
}
|
residual_based_pseudo_static_displacement_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ \.
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_RESIDUAL_PSEUDO_STATIC_DISPLACEMENT_SCHEME )
#define KRATOS_RESIDUAL_PSEUDO_STATIC_DISPLACEMENT_SCHEME
/* System includes */
/* External includes */
/* Project includes */
#include "solving_strategies/schemes/residual_based_bossak_displacement_scheme.hpp"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedPseudoStaticDisplacementScheme
* @ingroup KratosCore
* @brief This is a pseudo-static scheme
* @details For pseudo–static strategy: calculate the constant matrices D = Beta * M, "set" M = 0 after initializing the damping matrix
* @note Based on Riccardo Rossi PhD Thesis: Light weight Structures: Structural Analysis and Coupling Issues
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace, class TDenseSpace >
class ResidualBasedPseudoStaticDisplacementScheme
: public ResidualBasedBossakDisplacementScheme<TSparseSpace,TDenseSpace>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedPseudoStaticDisplacementScheme );
typedef Scheme<TSparseSpace,TDenseSpace> BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename Element::DofsVectorType DofsVectorType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef ModelPart::ElementsContainerType ElementsArrayType;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef typename BaseType::Pointer BaseTypePointer;
typedef ResidualBasedBossakDisplacementScheme<TSparseSpace,TDenseSpace> DerivedBaseType;
typedef typename BaseType::LocalSystemComponents LocalSystemComponentsType;
static constexpr double ZeroTolerance = std::numeric_limits<double>::epsilon();
///@}
///@name Life Cycle
///@{
/**
* @brief Constructor. The pseudo static scheme (parameters)
* @param ThisParameters Parameters with the Rayleigh variable
*/
explicit ResidualBasedPseudoStaticDisplacementScheme(Parameters ThisParameters)
: DerivedBaseType(0.0),
mRayleighBeta(NODAL_MAUX)
{
// Validate default parameters
Parameters default_parameters = Parameters(R"(
{
"name" : "ResidualBasedPseudoStaticDisplacementScheme",
"rayleigh_beta_variable" : "RAYLEIGH_BETA"
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
mRayleighBeta = KratosComponents<Variable<double>>::Get(ThisParameters["rayleigh_beta_variable"].GetString());
}
/**
* @brief Default constructor. The pseudo static scheme
*/
explicit ResidualBasedPseudoStaticDisplacementScheme(const Variable<double>& RayleighBetaVariable)
:DerivedBaseType(0.0),
mRayleighBeta(RayleighBetaVariable)
{
}
/** Copy Constructor.
*/
explicit ResidualBasedPseudoStaticDisplacementScheme(ResidualBasedPseudoStaticDisplacementScheme& rOther)
:DerivedBaseType(rOther),
mRayleighBeta(rOther.mRayleighBeta)
{
}
/**
* Clone
*/
BaseTypePointer Clone() override
{
return BaseTypePointer( new ResidualBasedPseudoStaticDisplacementScheme(*this) );
}
/** Destructor.
*/
~ResidualBasedPseudoStaticDisplacementScheme() override
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Performing the update of the solution
* @details Incremental update within newton iteration. It updates the state variables at the end of the time step u_{n+1}^{k+1}= u_{n+1}^{k}+ \Delta u
* @param rModelPart The model of the problem to solve
* @param rDofSet Set of all primary variables
* @param rA LHS matrix
* @param rDx incremental update of primary variables
* @param rb RHS Vector
*/
void Update(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
DerivedBaseType::mpDofUpdater->UpdateDofs(rDofSet, rDx);
// Updating time derivatives (nodally for efficiency)
const int num_nodes = static_cast<int>( rModelPart.Nodes().size() );
const auto it_node_begin = rModelPart.Nodes().begin();
array_1d<double, 3 > delta_displacement;
#pragma omp parallel for private(delta_displacement)
for(int i = 0; i < num_nodes; ++i) {
auto it_node = it_node_begin + i;
noalias(delta_displacement) = it_node->FastGetSolutionStepValue(DISPLACEMENT) - it_node->FastGetSolutionStepValue(DISPLACEMENT, 1);
array_1d<double, 3>& r_current_velocity = it_node->FastGetSolutionStepValue(VELOCITY);
const array_1d<double, 3>& r_previous_velocity = it_node->FastGetSolutionStepValue(VELOCITY, 1);
noalias(r_current_velocity) = (DerivedBaseType::mBossak.c1 * delta_displacement - DerivedBaseType::mBossak.c4 * r_previous_velocity);
}
KRATOS_CATCH( "" );
}
/**
* @brief Performing the prediction of the solution
* @details It predicts the solution for the current step x = xold + vold * Dt
* @param rModelPart The model of the problem to solve
* @param rDofSet set of all primary variables
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
*/
void Predict(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
// Process info
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Delta time
const double delta_time = r_current_process_info[DELTA_TIME];
// Updating time derivatives (nodally for efficiency)
const auto it_node_begin = rModelPart.Nodes().begin();
const int num_nodes = static_cast<int>(rModelPart.NumberOfNodes());
// Auxiliar variables
const array_1d<double, 3> zero_array = ZeroVector(3);
array_1d<double, 3 > delta_displacement = zero_array;
bool predicted_x, predicted_y, predicted_z;
// Getting position
const int disppos_x = it_node_begin->HasDofFor(DISPLACEMENT_X) ? it_node_begin->GetDofPosition(DISPLACEMENT_X) : -1;
const int velpos_x = it_node_begin->HasDofFor(VELOCITY_X) ? it_node_begin->GetDofPosition(VELOCITY_X) : -1;
const int disppos_y = it_node_begin->HasDofFor(DISPLACEMENT_Y) ? it_node_begin->GetDofPosition(DISPLACEMENT_Y) : -1;
const int velpos_y = it_node_begin->HasDofFor(VELOCITY_Y) ? it_node_begin->GetDofPosition(VELOCITY_Y) : -1;
const int disppos_z = it_node_begin->HasDofFor(DISPLACEMENT_Z) ? it_node_begin->GetDofPosition(DISPLACEMENT_Z) : -1;
const int velpos_z = it_node_begin->HasDofFor(VELOCITY_Z) ? it_node_begin->GetDofPosition(VELOCITY_Z) : -1;
#pragma omp parallel for private(delta_displacement, predicted_x, predicted_y, predicted_z)
for(int i = 0; i < num_nodes; ++i) {
auto it_node = it_node_begin + i;
predicted_x = false;
predicted_y = false;
predicted_z = false;
//Predicting: r_current_displacement = r_previous_displacement + r_previous_velocity * delta_time;
//ATTENTION::: the prediction is performed only on free nodes
const array_1d<double, 3>& r_previous_velocity = it_node->FastGetSolutionStepValue(VELOCITY, 1);
const array_1d<double, 3>& r_previous_displacement = it_node->FastGetSolutionStepValue(DISPLACEMENT, 1);
array_1d<double, 3>& r_current_acceleration = it_node->FastGetSolutionStepValue(ACCELERATION);
array_1d<double, 3>& r_current_velocity = it_node->FastGetSolutionStepValue(VELOCITY);
array_1d<double, 3>& r_current_displacement = it_node->FastGetSolutionStepValue(DISPLACEMENT);
if (velpos_x > -1) {
if (it_node->GetDof(VELOCITY_X, velpos_x).IsFixed()) {
delta_displacement[0] = (r_current_velocity[0] + DerivedBaseType::mBossak.c4 * r_previous_velocity[0])/DerivedBaseType::mBossak.c1;
r_current_displacement[0] = r_previous_displacement[0] + delta_displacement[0];
predicted_x = true;
}
}
if (disppos_x > -1 && !predicted_x) {
if (!it_node->GetDof(DISPLACEMENT_X, disppos_x).IsFixed() && !predicted_x) {
r_current_displacement[0] = r_previous_displacement[0] + delta_time * r_previous_velocity[0];
}
}
if (velpos_y > -1) {
if (it_node->GetDof(VELOCITY_Y, velpos_y).IsFixed()) {
delta_displacement[1] = (r_current_velocity[1] + DerivedBaseType::mBossak.c4 * r_previous_velocity[1])/DerivedBaseType::mBossak.c1;
r_current_displacement[1] = r_previous_displacement[1] + delta_displacement[1];
predicted_y = true;
}
}
if (disppos_y > -1 && !predicted_y) {
if (!it_node->GetDof(DISPLACEMENT_Y, disppos_y).IsFixed() && !predicted_y) {
r_current_displacement[1] = r_previous_displacement[1] + delta_time * r_previous_velocity[1];
}
}
if (velpos_z > -1) {
if (it_node->GetDof(VELOCITY_Z, velpos_z).IsFixed()) {
delta_displacement[2] = (r_current_velocity[2] + DerivedBaseType::mBossak.c4 * r_previous_velocity[2])/DerivedBaseType::mBossak.c1;
r_current_displacement[2] = r_previous_displacement[2] + delta_displacement[2];
predicted_z = true;
}
}
if (disppos_z > -1 && !predicted_z) {
if (!it_node->GetDof(DISPLACEMENT_Z, disppos_z).IsFixed() && !predicted_z) {
r_current_displacement[2] = r_previous_displacement[2] + delta_time * r_previous_velocity[2];
}
}
// Updating time derivatives
noalias(r_current_acceleration) = zero_array;
noalias(r_current_velocity) = r_previous_velocity;
}
KRATOS_CATCH( "" );
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedPseudoStaticDisplacementScheme";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info() << ". Considering the following damping variable " << mRayleighBeta;
}
///@}
///@name Friends
///@{
protected:
///@}
///@name Static Member Variables
///@{
///@}
///@name Protected Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief It adds the dynamic LHS contribution of the elements D*c1 + K
* @param rLHSContribution The dynamic contribution for the LHS
* @param rD The damping matrix
* @param rM The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToLHS(
LocalSystemMatrixType& rLHSContribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
ProcessInfo& rCurrentProcessInfo
) override
{
// Adding damping contribution
if (rD.size1() != 0 && TDenseSpace::TwoNorm(rD) > ZeroTolerance) // if D matrix declared
noalias(rLHSContribution) += rD * DerivedBaseType::mBossak.c1;
else if (rM.size1() != 0) {
const double beta = rCurrentProcessInfo[mRayleighBeta];
noalias(rLHSContribution) += rM * beta * DerivedBaseType::mBossak.c1;
}
}
/**
* @brief It adds the dynamic RHS contribution of the elements b - D*v
* @param pElement The element to compute
* @param RHS_Contribution The dynamic contribution for the RHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToRHS(
Element::Pointer pElement,
LocalSystemVectorType& rRHSContribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
ProcessInfo& rCurrentProcessInfo
) override
{
const std::size_t this_thread = OpenMPUtils::ThisThread();
// Adding damping contribution
if (rD.size1() != 0 && TDenseSpace::TwoNorm(rD) > ZeroTolerance) {
pElement->GetFirstDerivativesVector(DerivedBaseType::mVector.v[this_thread], 0);
noalias(rRHSContribution) -= prod(rD, DerivedBaseType::mVector.v[this_thread]);
} else if (rM.size1() != 0) {
const double beta = rCurrentProcessInfo[mRayleighBeta];
pElement->GetFirstDerivativesVector(DerivedBaseType::mVector.v[this_thread], 0);
noalias(rRHSContribution) -= beta * prod(rM, DerivedBaseType::mVector.v[this_thread]);
}
}
/**
* @brief It adds the dynamic RHS contribution of the condition b - M*a - D*v
* @param pCondition The condition to compute
* @param rRHSContribution The dynamic contribution for the RHS
* @param rD The damping matrix
* @param rM The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToRHS(
Condition::Pointer pCondition,
LocalSystemVectorType& rRHSContribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
ProcessInfo& rCurrentProcessInfo
) override
{
const std::size_t this_thread = OpenMPUtils::ThisThread();
// Adding damping contribution
// Damping contribution
if (rD.size1() != 0 && TDenseSpace::TwoNorm(rD) > ZeroTolerance) {
pCondition->GetFirstDerivativesVector(DerivedBaseType::mVector.v[this_thread], 0);
noalias(rRHSContribution) -= prod(rD, DerivedBaseType::mVector.v[this_thread]);
} else if (rM.size1() != 0) {
const double beta = rCurrentProcessInfo[mRayleighBeta];
pCondition->GetFirstDerivativesVector(DerivedBaseType::mVector.v[this_thread], 0);
noalias(rRHSContribution) -= beta * prod(rM, DerivedBaseType::mVector.v[this_thread]);
}
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
Variable<double> mRayleighBeta; /// The Rayleigh Beta variable
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedPseudoStaticDisplacementScheme */
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_PSEUDO_STATIC_DISPLACEMENT_SCHEME E defined */
|
mkldnn_batch_norm-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file mkldnn_batch_norm.cc
* \brief
* \author Tao Lv
*/
#ifndef MXNET_OPERATOR_NN_MKLDNN_MKLDNN_BATCH_NORM_INL_H_
#define MXNET_OPERATOR_NN_MKLDNN_MKLDNN_BATCH_NORM_INL_H_
#if MXNET_USE_MKLDNN == 1
#include <vector>
#include <utility>
#include <mkldnn.hpp>
#include "../batch_norm-inl.h"
#include "./mkldnn_ops-inl.h"
#include "./mkldnn_base-inl.h"
#define VARIANCE_TO_INVSTD(__var$, __eps$) (1.0/sqrt((__var$) + DType(__eps$)))
#define INVSTD_TO_VARIANCE(__invstd$, __eps$) ((1.0 / ((__invstd$) * (__invstd$))) - (__eps$))
namespace mxnet {
namespace op {
typedef mkldnn::batch_normalization_forward::primitive_desc t_bn_f_pdesc;
typedef mkldnn::batch_normalization_forward::desc t_bn_f_desc;
typedef mkldnn::batch_normalization_backward::primitive_desc t_bn_b_pdesc;
typedef mkldnn::batch_normalization_backward::desc t_bn_b_desc;
using mkldnn::use_global_stats;
using mkldnn::use_scale_shift;
using mkldnn::forward_training;
using mkldnn::forward_inference;
inline static unsigned _GetFlags(const std::vector<NDArray> &in_data,
const std::vector<NDArray> &aux_states,
const BatchNormParam ¶m, bool is_train) {
unsigned flags = 0U;
if (in_data.size() == 3U) {
flags |= use_scale_shift;
}
// aux_states[0]: inMean
// aux_states[1]: inVariance
if (aux_states.size() == 2U && !is_train) {
flags |= use_global_stats;
}
return flags;
}
template <typename DType>
inline static t_bn_f_pdesc _GetFwd(const mkldnn::memory &data_mem,
bool is_train,
DType eps,
unsigned flags) {
auto data_mpd = data_mem.get_primitive_desc();
auto data_md = data_mpd.desc();
auto engine = CpuEngine::Get()->get_engine();
if (is_train) {
t_bn_f_desc bnFwd_desc(forward_training, data_md, eps, flags);
return t_bn_f_pdesc(bnFwd_desc, engine);
} else {
t_bn_f_desc bnFwd_desc(forward_inference, data_md, eps, flags);
return t_bn_f_pdesc(bnFwd_desc, engine);
}
}
template <typename DType>
inline static t_bn_b_pdesc _GetBwd(const mkldnn::memory &data_mem,
const mkldnn::memory &diff_mem,
DType eps,
unsigned flags) {
auto data_mpd = data_mem.get_primitive_desc();
auto data_md = data_mpd.desc();
auto diff_mpd = diff_mem.get_primitive_desc();
auto diff_md = diff_mpd.desc();
auto engine = CpuEngine::Get()->get_engine();
t_bn_b_desc bnBwd_desc(mkldnn::prop_kind::backward, diff_md, data_md, eps, flags);
return t_bn_b_pdesc(bnBwd_desc, engine, _GetFwd(data_mem, true, eps, flags));
}
typedef ParamOpSign<BatchNormParam> MKLDNNBNSignature;
class MKLDNNBNForward {
std::shared_ptr<const mkldnn::memory> data_m;
std::shared_ptr<const mkldnn::memory> weight_m;
std::shared_ptr<const mkldnn::memory> out_m;
std::shared_ptr<const mkldnn::memory> mean_m;
std::shared_ptr<const mkldnn::memory> var_m;
std::shared_ptr<mkldnn::batch_normalization_forward> fwd;
bool is_train;
t_bn_f_pdesc pd;
public:
MKLDNNBNForward(const t_bn_f_pdesc &_pd, bool is_train): pd(_pd) {
weight_m.reset(new mkldnn::memory(pd.weights_primitive_desc()));
this->is_train = is_train;
}
const mkldnn::memory &GetWeight() const {
return *weight_m;
}
const t_bn_f_pdesc &GetPd() const {
return pd;
}
const mkldnn::memory &GetMean() const {
return *mean_m;
}
const mkldnn::memory &GetVar() const {
return *var_m;
}
void SetDataHandle(const NDArray &data, const NDArray &mean,
const NDArray &var, const mkldnn::memory &out) {
auto _data = data.GetMKLDNNData();
if (data_m) {
data_m->set_data_handle(_data->get_data_handle());
} else {
data_m.reset(new mkldnn::memory(_data->get_primitive_desc(),
_data->get_data_handle()));
}
if (out_m) {
out_m->set_data_handle(out.get_data_handle());
} else {
out_m.reset(new mkldnn::memory(out.get_primitive_desc(),
out.get_data_handle()));
}
auto mean_ptr = mean.data().dptr_;
if (mean_m) {
mean_m->set_data_handle(mean_ptr);
} else {
mean_m.reset(new mkldnn::memory(pd.mean_primitive_desc(),
mean_ptr));
}
auto var_ptr = var.data().dptr_;
if (var_m) {
var_m->set_data_handle(var_ptr);
} else {
var_m.reset(new mkldnn::memory(pd.variance_primitive_desc(),
var_ptr));
}
if (fwd == nullptr) {
if (!is_train)
fwd.reset(new mkldnn::batch_normalization_forward(
pd, *data_m, mkldnn::primitive::at(*mean_m),
mkldnn::primitive::at(*var_m), *weight_m, *out_m));
else
fwd.reset(new mkldnn::batch_normalization_forward(
pd, mkldnn::primitive::at(*data_m),
mkldnn::primitive::at(*weight_m), *out_m,
*mean_m, *var_m));
}
}
const mkldnn::batch_normalization_forward &GetFwd() const {
return *fwd;
}
};
template<typename DType>
static MKLDNNBNForward &GetBNForward(const BatchNormParam& param,
const OpContext &ctx, const NDArray &in_data,
unsigned flags) {
static thread_local std::unordered_map<MKLDNNBNSignature, MKLDNNBNForward, OpHash> fwds;
MKLDNNBNSignature key(param);
key.AddSign(ctx.is_train);
key.AddSign(in_data);
auto it = fwds.find(key);
if (it == fwds.end()) {
auto fwd_pd = _GetFwd(*in_data.GetMKLDNNData(), ctx.is_train,
(DType) param.eps, flags);
MKLDNNBNForward fwd(fwd_pd, ctx.is_train);
auto ins_ret = fwds.insert(std::pair<MKLDNNBNSignature, MKLDNNBNForward>(
key, fwd));
CHECK(ins_ret.second);
it = ins_ret.first;
}
return it->second;
}
template <typename DType>
void MKLDNNBatchNormForward(const OpContext &ctx, const BatchNormParam ¶m,
const std::vector<NDArray> &in_data,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &out_data,
const std::vector<NDArray> &aux_states) {
TmpMemMgr::Get()->Init(ctx.requested[batchnorm::kTempSpace]);
unsigned flags = _GetFlags(in_data, aux_states, param, ctx.is_train);
const NDArray &data = in_data[batchnorm::kData];
auto &fwd = GetBNForward<DType>(param, ctx, data, flags);
const NDArray &out = out_data[batchnorm::kOut];
// for output memory
auto out_mem = const_cast<NDArray &>(out).CreateMKLDNNData(fwd.GetPd().dst_primitive_desc());
// mxnet will always use scale shift.
// But if fix_gamma is true, then all scale elements will be set to 1.0f
if (flags & use_scale_shift) {
const NDArray &gamma = in_data[batchnorm::kGamma];
const NDArray &beta = in_data[batchnorm::kBeta];
CHECK_EQ(gamma.storage_type(), mxnet::kDefaultStorage);
CHECK_EQ(beta.storage_type(), mxnet::kDefaultStorage);
const mkldnn::memory &weight_mem = fwd.GetWeight();
DType* weight_buf = reinterpret_cast<DType *>(weight_mem.get_data_handle());
nnvm::dim_t channels_ = data.shape()[1];
CHECK(weight_mem.get_primitive_desc().get_size() == channels_ * sizeof(DType) * 2);
DType* weight_ptr = gamma.data().dptr<DType>();
DType* bias_ptr = beta.data().dptr<DType>();
if (!param.fix_gamma) {
#pragma omp parallel for
for (int i = 0; i < channels_; i++) {
weight_buf[i] = weight_ptr[i];
weight_buf[channels_ + i] = bias_ptr[i]; // bias
}
} else if (IsBNWriting(req[batchnorm::kGamma])) {
#pragma omp parallel for
for (int i = 0; i < channels_; i++) {
weight_buf[i] = (DType)1.0f;
weight_ptr[i] = (DType)1.0f;
weight_buf[channels_ + i] = bias_ptr[i]; // bias
}
} else {
#pragma omp parallel for
for (int i = 0; i < channels_; i++) {
weight_buf[i] = (DType)1.0f;
weight_buf[channels_ + i] = bias_ptr[i]; // bias
}
}
if (!ctx.is_train) {
DType* omean = out_data[batchnorm::kMean].data().dptr<DType>();
DType* ovar = out_data[batchnorm::kVar].data().dptr<DType>();
DType* inmean = aux_states[batchnorm::kMovingMean].data().dptr<DType>();
DType* invar = aux_states[batchnorm::kMovingVar].data().dptr<DType>();
// to align with origin implmentation: batch_norm.cc: L164
#pragma omp parallel for
for (int i = 0; i < channels_; i++) {
omean[i] = inmean[i];
ovar[i] = VARIANCE_TO_INVSTD(invar[i], param.eps);
}
fwd.SetDataHandle(data, aux_states[batchnorm::kMovingMean],
aux_states[batchnorm::kMovingVar],
*out_mem);
MKLDNNStream::Get()->RegisterPrim(fwd.GetFwd());
MKLDNNStream::Get()->Submit();
} else { // training
const NDArray &outMean = out_data[batchnorm::kMean];
const NDArray &outVar = out_data[batchnorm::kVar];
DType* omean = outMean.data().dptr<DType>();
DType* ovar = outVar.data().dptr<DType>();
fwd.SetDataHandle(data, outMean, outVar, *out_mem);
MKLDNNStream::Get()->RegisterPrim(fwd.GetFwd());
MKLDNNStream::Get()->Submit();
DType* mean_mem_ptr = reinterpret_cast<DType*>(fwd.GetMean().get_data_handle());
DType* var_mem_ptr = reinterpret_cast<DType*>(fwd.GetVar().get_data_handle());
#pragma omp parallel for
for (int i = 0; i < channels_; i++) {
omean[i] = mean_mem_ptr[i];
ovar[i] = VARIANCE_TO_INVSTD(var_mem_ptr[i], param.eps);
}
}
} else { // no input gamma and beta
LOG(FATAL) << "MKLDNN batch normalization: should not reach here ...";
}
}
template <typename DType>
void MKLDNNBatchNormBackward(const OpContext &ctx, const BatchNormParam ¶m,
const std::vector<NDArray> &out_grad,
const std::vector<NDArray> &in_data,
const std::vector<NDArray> &out_data,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &in_grad,
const std::vector<NDArray> &aux_states) {
TmpMemMgr::Get()->Init(ctx.requested[batchnorm::kTempSpace]);
CHECK_EQ(out_grad.size(), 1U);
CHECK_EQ(in_data.size(), 3U);
CHECK_EQ(out_data.size(), 3U);
CHECK_EQ(in_grad.size(), 3U);
unsigned flags = _GetFlags(in_data, aux_states, param, ctx.is_train);
const NDArray &data = in_data[batchnorm::kData];
const NDArray &diff = out_grad[batchnorm::kOut];
const NDArray &gradIn = in_grad[batchnorm::kData];
const NDArray &moving_mean = aux_states[batchnorm::kMovingMean];
const NDArray &moving_var = aux_states[batchnorm::kMovingVar];
const NDArray &out_mean = out_data[batchnorm::kMean];
const NDArray &out_var = out_data[batchnorm::kVar];
CHECK(out_mean.IsDefaultData());
CHECK(out_var.IsDefaultData());
CHECK(moving_mean.IsDefaultData());
CHECK(moving_var.IsDefaultData());
auto data_mem = data.GetMKLDNNData();
auto diff_mem = diff.GetMKLDNNData();
// MKLDNN batchnorm should run on special layouts. If one of them isn't, we
// should reorder them.
if (data.IsDefaultData())
data_mem = data.GetMKLDNNDataReorder(diff_mem->get_primitive_desc());
else if (diff.IsDefaultData())
diff_mem = diff.GetMKLDNNDataReorder(data_mem->get_primitive_desc());
auto bwd_pd = _GetBwd(*data_mem, *diff_mem, param.eps, flags);
auto gradi_mem = const_cast<NDArray &>(gradIn).CreateMKLDNNData(data_mem->get_primitive_desc());
if (flags & use_scale_shift) {
const NDArray &gamma = in_data[batchnorm::kGamma];
const NDArray &beta = in_data[batchnorm::kBeta];
// TODO(tao): how to reuse this memory?
std::shared_ptr<const mkldnn::memory> weight_mem(
new mkldnn::memory(bwd_pd.weights_primitive_desc()));
DType* weight_buf = reinterpret_cast<DType *>(weight_mem->get_data_handle());
nnvm::dim_t channels_ = data.shape()[1];
for (int i = 0; i < channels_; i++) {
if (!param.fix_gamma)
weight_buf[i] = (gamma.data().dptr<DType>())[i]; // weight
else
weight_buf[i] = (DType)1.0f;
}
for (int i = 0; i < channels_; i++) {
weight_buf[channels_ + i] = (beta.data().dptr<DType>())[i]; // bias
}
std::shared_ptr<const mkldnn::memory> gradw_mem(
new mkldnn::memory(bwd_pd.diff_weights_primitive_desc()));
// training but no input mean and variance
if (ctx.is_train && !param.use_global_stats) {
DType* moving_mean_ptr = reinterpret_cast<DType *>(moving_mean.data().dptr<DType>());
DType* moving_var_ptr = reinterpret_cast<DType *>(moving_var.data().dptr<DType>());
DType* out_mean_ptr = reinterpret_cast<DType *>(out_mean.data().dptr<DType>());
DType* out_var_ptr = reinterpret_cast<DType *>(out_var.data().dptr<DType>());
mkldnn::memory var_mem(bwd_pd.variance_primitive_desc());
DType *tmp_var_ptr = reinterpret_cast<DType *>(var_mem.get_data_handle());
DType minus_mom = (1.0f - param.momentum);
for (int i = 0; i < channels_; i++) {
moving_mean_ptr[i] = moving_mean_ptr[i] * param.momentum +
out_mean_ptr[i] * minus_mom;
float variance = INVSTD_TO_VARIANCE(out_var_ptr[i], param.eps);
tmp_var_ptr[i] = variance;
moving_var_ptr[i] = moving_var_ptr[i] * param.momentum +
variance * minus_mom;
}
std::shared_ptr<const mkldnn::memory> out_mean_mem(
new mkldnn::memory(bwd_pd.mean_primitive_desc(), out_mean_ptr));
std::shared_ptr<const mkldnn::memory> out_var_mem(
new mkldnn::memory(bwd_pd.variance_primitive_desc(), out_var_ptr));
auto bn_bwd = mkldnn::batch_normalization_backward(bwd_pd,
*data_mem,
mkldnn::primitive::at(*out_mean_mem),
mkldnn::primitive::at(var_mem),
*diff_mem,
*weight_mem,
*gradi_mem,
*gradw_mem);
MKLDNNStream::Get()->RegisterPrim(bn_bwd);
MKLDNNStream::Get()->Submit();
} else {
std::shared_ptr<const mkldnn::memory> imean_mem(
new mkldnn::memory(bwd_pd.mean_primitive_desc(),
moving_mean.data().dptr<DType>()));
std::shared_ptr<const mkldnn::memory> ivar_mem(
new mkldnn::memory(bwd_pd.variance_primitive_desc(),
moving_var.data().dptr<DType>()));
auto bn_bwd = mkldnn::batch_normalization_backward(bwd_pd,
*data_mem,
mkldnn::primitive::at(*imean_mem),
mkldnn::primitive::at(*ivar_mem),
*diff_mem,
*weight_mem,
*gradi_mem,
*gradw_mem);
MKLDNNStream::Get()->RegisterPrim(bn_bwd);
MKLDNNStream::Get()->Submit();
}
// copy data from gradw_mem to in_grad[1] and in_grad[2]
DType* gw_buf = reinterpret_cast<DType *>(gradw_mem->get_data_handle());
for (int i = 0; i < channels_; i++) {
if (!param.fix_gamma)
(in_grad[1].data().dptr<DType>())[i] = gw_buf[i];
else
(in_grad[1].data().dptr<DType>())[i] = 0.0f;
}
for (int i = 0; i < channels_; i++) {
(in_grad[2].data().dptr<DType>())[i] = gw_buf[i + channels_];
}
} else {
LOG(FATAL) << "MKLDNN batch normalization backward: should not reach here ...";
}
}
} // namespace op
} // namespace mxnet
#endif // MXNET_USE_MKLDNN
#endif // MXNET_OPERATOR_NN_MKLDNN_MKLDNN_BATCH_NORM_INL_H_
|
ams.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "float.h"
#include "ams.h"
/*--------------------------------------------------------------------------
* hypre_ParCSRRelax
*
* Relaxation on the ParCSR matrix A with right-hand side f and
* initial guess u. Possible values for relax_type are:
*
* 1 = l1-scaled (or weighted) Jacobi
* 2 = l1-scaled block Gauss-Seidel/SSOR
* 3 = Kaczmarz
* 4 = truncated version of 2 (Remark 6.2 in smoothers paper)
* x = BoomerAMG relaxation with relax_type = |x|
* (16 = Cheby)
*
* The default value of relax_type is 2.
*--------------------------------------------------------------------------*/
#if defined(HYPRE_USING_CUDA)
struct l1_norm_op1 : public thrust::binary_function<HYPRE_Complex, HYPRE_Complex, HYPRE_Complex>
{
__host__ __device__
HYPRE_Complex operator()(HYPRE_Complex &x, HYPRE_Complex &y) const
{
return x <= 4.0/3.0 * y ? y : x;
}
};
#endif
HYPRE_Int hypre_ParCSRRelax(/* matrix to relax with */
hypre_ParCSRMatrix *A,
/* right-hand side */
hypre_ParVector *f,
/* relaxation type */
HYPRE_Int relax_type,
/* number of sweeps */
HYPRE_Int relax_times,
/* l1 norms of the rows of A */
HYPRE_Real *l1_norms,
/* damping coefficient (usually <= 1) */
HYPRE_Real relax_weight,
/* SOR parameter (usually in (0,2) */
HYPRE_Real omega,
/* for cheby smoothers */
HYPRE_Real max_eig_est,
HYPRE_Real min_eig_est,
HYPRE_Int cheby_order,
HYPRE_Real cheby_fraction,
/* initial/updated approximation */
hypre_ParVector *u,
/* temporary vector */
hypre_ParVector *v,
/* temporary vector */
hypre_ParVector *z)
{
HYPRE_Int sweep;
HYPRE_Complex *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
HYPRE_Complex *f_data = hypre_VectorData(hypre_ParVectorLocalVector(f));
HYPRE_Complex *v_data = hypre_VectorData(hypre_ParVectorLocalVector(v));
for (sweep = 0; sweep < relax_times; sweep++)
{
if (relax_type == 1) /* l1-scaled Jacobi */
{
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
HYPRE_Int sync_stream = hypre_HandleCudaComputeStreamSync(hypre_handle());
hypre_HandleCudaComputeStreamSync(hypre_handle()) = 0;
#endif
hypre_ParVectorCopy(f, v);
hypre_ParCSRMatrixMatvec(-relax_weight, A, u, relax_weight, v);
#if defined(HYPRE_USING_CUDA)
hypreDevice_IVAXPY(num_rows, l1_norms, v_data, u_data);
#else /* #if defined(HYPRE_USING_CUDA) */
HYPRE_Int i;
/* u += w D^{-1}(f - A u), where D_ii = ||A(i,:)||_1 */
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(u_data,v_data,l1_norms)
#endif
for (i = 0; i < num_rows; i++)
{
u_data[i] += v_data[i] / l1_norms[i];
}
#endif /* #if defined(HYPRE_USING_CUDA) */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
hypre_HandleCudaComputeStreamSync(hypre_handle()) = sync_stream;
hypre_SyncCudaComputeStream(hypre_handle());
#endif
}
else if (relax_type == 2 || relax_type == 4) /* offd-l1-scaled block GS */
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real *u_offd_data = hypre_TAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
HYPRE_Real res;
HYPRE_Int num_procs;
hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs);
/* Copy off-diagonal values of u to the current processor */
if (num_procs > 1)
{
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int num_sends;
HYPRE_Real *u_buf_data;
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int index = 0, start;
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
u_buf_data = hypre_TAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
u_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate(1,comm_pkg,u_buf_data,u_offd_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(u_buf_data, HYPRE_MEMORY_HOST);
}
if (relax_weight == 1.0 && omega == 1.0) /* symmetric Gauss-Seidel */
{
/* Forward local pass */
for (i = 0; i < num_rows; i++)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += res / l1_norms[i];
}
/* Backward local pass */
for (i = num_rows-1; i > -1; i--)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += res / l1_norms[i];
}
}
else if (relax_weight == 1.0) /* SSOR */
{
/* Forward local pass */
for (i = 0; i < num_rows; i++)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += omega * res / l1_norms[i];
}
/* Backward local pass */
for (i = num_rows-1; i > -1; i--)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += omega * res / l1_norms[i];
}
}
else /* scaled SSOR */
{
HYPRE_Real dif;
HYPRE_Real c1 = omega * relax_weight;
HYPRE_Real c2 = omega * (1.0 - relax_weight);
/* Forward local pass (save initial guess in v_data) */
for (i = 0; i < num_rows; i++)
{
dif = 0.0;
v_data[i] = u_data[i];
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (A_diag_J[j] < i)
dif += A_diag_data[j] * (v_data[A_diag_J[j]] - u_data[A_diag_J[j]]);
}
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += (c1 * res + c2 * dif) / l1_norms[i];
}
/* Backward local pass */
for (i = num_rows-1; i > -1; i--)
{
dif = 0.0;
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (A_diag_J[j] > i)
dif += A_diag_data[j] * (v_data[A_diag_J[j]] - u_data[A_diag_J[j]]);
}
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += (c1 * res + c2 * dif) / l1_norms[i];
}
}
hypre_TFree(u_offd_data, HYPRE_MEMORY_HOST);
}
else if (relax_type == 3) /* Kaczmarz */
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real *u_offd_data = hypre_TAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
HYPRE_Real res;
HYPRE_Int num_procs;
hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs);
/* Copy off-diagonal values of u to the current processor */
if (num_procs > 1)
{
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int num_sends;
HYPRE_Real *u_buf_data;
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int index = 0, start;
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
u_buf_data = hypre_TAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
u_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate(1,comm_pkg,u_buf_data,u_offd_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(u_buf_data, HYPRE_MEMORY_HOST);
}
/* Forward local pass */
for (i = 0; i < num_rows; i++)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
res /= l1_norms[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
u_data[A_diag_J[j]] += omega * res * A_diag_data[j];
}
/* Backward local pass */
for (i = num_rows-1; i > -1; i--)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
res /= l1_norms[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
u_data[A_diag_J[j]] += omega * res * A_diag_data[j];
}
hypre_TFree(u_offd_data, HYPRE_MEMORY_HOST);
}
else /* call BoomerAMG relaxation */
{
if (relax_type == 16)
{
hypre_ParCSRRelax_Cheby(A,
f,
max_eig_est,
min_eig_est,
cheby_fraction, cheby_order, 1,
0, u, v, z);
}
else
{
hypre_BoomerAMGRelax(A, f, NULL, hypre_abs(relax_type), 0, relax_weight,
omega, l1_norms, u, v, z);
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorInRangeOf
*
* Return a vector that belongs to the range of a given matrix.
*--------------------------------------------------------------------------*/
hypre_ParVector *hypre_ParVectorInRangeOf(hypre_ParCSRMatrix *A)
{
hypre_ParVector *x;
x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(x);
hypre_ParVectorOwnsData(x) = 1;
hypre_ParVectorOwnsPartitioning(x) = 0;
return x;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorInDomainOf
*
* Return a vector that belongs to the domain of a given matrix.
*--------------------------------------------------------------------------*/
hypre_ParVector *hypre_ParVectorInDomainOf(hypre_ParCSRMatrix *A)
{
hypre_ParVector *x;
x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumCols(A),
hypre_ParCSRMatrixColStarts(A));
hypre_ParVectorInitialize(x);
hypre_ParVectorOwnsData(x) = 1;
hypre_ParVectorOwnsPartitioning(x) = 0;
return x;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorBlockSplit
*
* Extract the dim sub-vectors x_0,...,x_{dim-1} composing a parallel
* block vector x. It is assumed that &x[i] = [x_0[i],...,x_{dim-1}[i]].
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParVectorBlockSplit(hypre_ParVector *x,
hypre_ParVector *x_[3],
HYPRE_Int dim)
{
HYPRE_Int i, d, size_;
HYPRE_Real *x_data, *x_data_[3];
size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0]));
x_data = hypre_VectorData(hypre_ParVectorLocalVector(x));
for (d = 0; d < dim; d++)
x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d]));
for (i = 0; i < size_; i++)
for (d = 0; d < dim; d++)
x_data_[d][i] = x_data[dim*i+d];
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorBlockGather
*
* Compose a parallel block vector x from dim given sub-vectors
* x_0,...,x_{dim-1}, such that &x[i] = [x_0[i],...,x_{dim-1}[i]].
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParVectorBlockGather(hypre_ParVector *x,
hypre_ParVector *x_[3],
HYPRE_Int dim)
{
HYPRE_Int i, d, size_;
HYPRE_Real *x_data, *x_data_[3];
size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0]));
x_data = hypre_VectorData(hypre_ParVectorLocalVector(x));
for (d = 0; d < dim; d++)
x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d]));
for (i = 0; i < size_; i++)
for (d = 0; d < dim; d++)
x_data[dim*i+d] = x_data_[d][i];
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_BoomerAMGBlockSolve
*
* Apply the block-diagonal solver diag(B) to the system diag(A) x = b.
* Here B is a given BoomerAMG solver for A, while x and b are "block"
* parallel vectors.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_BoomerAMGBlockSolve(void *B,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x)
{
HYPRE_Int d, dim = 1;
hypre_ParVector *b_[3];
hypre_ParVector *x_[3];
dim = hypre_ParVectorGlobalSize(x) / hypre_ParCSRMatrixGlobalNumRows(A);
if (dim == 1)
{
hypre_BoomerAMGSolve(B, A, b, x);
return hypre_error_flag;
}
for (d = 0; d < dim; d++)
{
b_[d] = hypre_ParVectorInRangeOf(A);
x_[d] = hypre_ParVectorInRangeOf(A);
}
hypre_ParVectorBlockSplit(b, b_, dim);
hypre_ParVectorBlockSplit(x, x_, dim);
for (d = 0; d < dim; d++)
hypre_BoomerAMGSolve(B, A, b_[d], x_[d]);
hypre_ParVectorBlockGather(x, x_, dim);
for (d = 0; d < dim; d++)
{
hypre_ParVectorDestroy(b_[d]);
hypre_ParVectorDestroy(x_[d]);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixFixZeroRows
*
* For every zero row in the matrix: set the diagonal element to 1.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRMatrixFixZeroRows(hypre_ParCSRMatrix *A)
{
HYPRE_Int i, j;
HYPRE_Real l1_norm;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
/* a row will be considered zero if its l1 norm is less than eps */
HYPRE_Real eps = 0.0; /* DBL_EPSILON * 1e+4; */
for (i = 0; i < num_rows; i++)
{
l1_norm = 0.0;
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm += fabs(A_diag_data[j]);
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm += fabs(A_offd_data[j]);
if (l1_norm <= eps)
{
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
if (A_diag_J[j] == i)
A_diag_data[j] = 1.0;
else
A_diag_data[j] = 0.0;
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
A_offd_data[j] = 0.0;
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRComputeL1Norms
*
* Compute the l1 norms of the rows of a given matrix, depending on
* the option parameter:
*
* option 1 = Compute the l1 norm of the rows
* option 2 = Compute the l1 norm of the (processor) off-diagonal
* part of the rows plus the diagonal of A
* option 3 = Compute the l2 norm^2 of the rows
* option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid
* Smoothers for Ultra-Parallel Computing"
*
* The above computations are done in a CF manner, whenever the provided
* cf_marker is not NULL.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRComputeL1Norms(hypre_ParCSRMatrix *A,
HYPRE_Int option,
HYPRE_Int *cf_marker,
HYPRE_Real **l1_norm_ptr)
{
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_MemoryLocation memory_location_l1 = hypre_ParCSRMatrixMemoryLocation(A);
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( memory_location_l1 );
if (exec == HYPRE_EXEC_HOST)
{
HYPRE_Int num_threads = hypre_NumThreads();
if (num_threads > 1)
{
return hypre_ParCSRComputeL1NormsThreads(A, option, num_threads, cf_marker, l1_norm_ptr);
}
}
HYPRE_Real *l1_norm = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_l1);
HYPRE_MemoryLocation memory_location_tmp = exec == HYPRE_EXEC_HOST ? HYPRE_MEMORY_HOST : HYPRE_MEMORY_DEVICE;
HYPRE_Real *diag_tmp = NULL;
HYPRE_Int *cf_marker_offd = NULL, *cf_marker_dev = NULL;
/* collect the cf marker data from other procs */
if (cf_marker != NULL)
{
HYPRE_Int index;
HYPRE_Int num_sends;
HYPRE_Int start;
HYPRE_Int *int_buf_data = NULL;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
if (num_cols_offd)
{
cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, memory_location_tmp);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends))
{
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_HOST);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate_v2(11, comm_pkg, HYPRE_MEMORY_HOST, int_buf_data,
memory_location_tmp, cf_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
if (exec == HYPRE_EXEC_DEVICE)
{
cf_marker_dev = hypre_TAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(cf_marker_dev, cf_marker, HYPRE_Int, num_rows, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
}
else
{
cf_marker_dev = cf_marker;
}
}
if (option == 1)
{
/* Set the l1 norm of the diag part */
hypre_CSRMatrixComputeRowSum(A_diag, cf_marker_dev, cf_marker_dev, l1_norm, 1, 1.0, "set");
/* Add the l1 norm of the offd part */
if (num_cols_offd)
{
hypre_CSRMatrixComputeRowSum(A_offd, cf_marker_dev, cf_marker_offd, l1_norm, 1, 1.0, "add");
}
}
else if (option == 2)
{
/* Set the abs(diag) element */
hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 1);
/* Add the l1 norm of the offd part */
if (num_cols_offd)
{
hypre_CSRMatrixComputeRowSum(A_offd, cf_marker_dev, cf_marker_offd, l1_norm, 1, 1.0, "add");
}
}
else if (option == 3)
{
/* Set the CF l2 norm of the diag part */
hypre_CSRMatrixComputeRowSum(A_diag, NULL, NULL, l1_norm, 2, 1.0, "set");
/* Add the CF l2 norm of the offd part */
if (num_cols_offd)
{
hypre_CSRMatrixComputeRowSum(A_offd, NULL, NULL, l1_norm, 2, 1.0, "add");
}
}
else if (option == 4)
{
/* Set the abs(diag) element */
hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 1);
diag_tmp = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_tmp);
hypre_TMemcpy(diag_tmp, l1_norm, HYPRE_Real, num_rows, memory_location_tmp, memory_location_l1);
/* Add the scaled l1 norm of the offd part */
if (num_cols_offd)
{
hypre_CSRMatrixComputeRowSum(A_offd, cf_marker_dev, cf_marker_offd, l1_norm, 1, 0.5, "add");
}
/* Truncate according to Remark 6.2 */
#if defined(HYPRE_USING_CUDA)
if (exec == HYPRE_EXEC_DEVICE)
{
HYPRE_THRUST_CALL( transform, l1_norm, l1_norm + num_rows, diag_tmp, l1_norm, l1_norm_op1() );
}
else
#endif
{
for (i = 0; i < num_rows; i++)
{
if (l1_norm[i] <= 4.0/3.0 * diag_tmp[i])
{
l1_norm[i] = diag_tmp[i];
}
}
}
}
else if (option == 5) /*stores diagonal of A for Jacobi using matvec, rlx 7 */
{
/* Set the diag element */
hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 0);
#if defined(HYPRE_USING_CUDA)
if ( exec == HYPRE_EXEC_DEVICE)
{
thrust::identity<HYPRE_Complex> identity;
HYPRE_THRUST_CALL( replace_if, l1_norm, l1_norm + num_rows, thrust::not1(identity), 1.0 );
}
else
#endif
{
for (i = 0; i < num_rows; i++)
{
if (l1_norm[i] == 0.0)
{
l1_norm[i] = 1.0;
}
}
}
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
/* Handle negative definite matrices */
if (!diag_tmp)
{
diag_tmp = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_tmp);
}
/* Set the diag element */
hypre_CSRMatrixExtractDiagonal(A_diag, diag_tmp, 0);
#if defined(HYPRE_USING_CUDA)
if (exec == HYPRE_EXEC_DEVICE)
{
HYPRE_THRUST_CALL( transform_if, l1_norm, l1_norm + num_rows, diag_tmp, l1_norm, thrust::negate<HYPRE_Real>(),
is_negative<HYPRE_Real>() );
//bool any_zero = HYPRE_THRUST_CALL( any_of, l1_norm, l1_norm + num_rows, thrust::not1(thrust::identity<HYPRE_Complex>()) );
bool any_zero = 0.0 == HYPRE_THRUST_CALL( reduce, l1_norm, l1_norm + num_rows, 1.0, thrust::minimum<HYPRE_Real>() );
if ( any_zero )
{
hypre_error_in_arg(1);
}
}
else
#endif
{
for (i = 0; i < num_rows; i++)
{
if (diag_tmp[i] < 0.0)
{
l1_norm[i] = -l1_norm[i];
}
}
for (i = 0; i < num_rows; i++)
{
/* if (fabs(l1_norm[i]) < DBL_EPSILON) */
if (fabs(l1_norm[i]) == 0.0)
{
hypre_error_in_arg(1);
break;
}
}
}
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_TFree(cf_marker_dev, HYPRE_MEMORY_DEVICE);
}
hypre_TFree(cf_marker_offd, memory_location_tmp);
hypre_TFree(diag_tmp, memory_location_tmp);
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetDiagRows
*
* For every row containing only a diagonal element: set it to d.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRMatrixSetDiagRows(hypre_ParCSRMatrix *A, HYPRE_Real d)
{
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
for (i = 0; i < num_rows; i++)
{
j = A_diag_I[i];
if ((A_diag_I[i+1] == j+1) && (A_diag_J[j] == i) &&
(!num_cols_offd || (A_offd_I[i+1] == A_offd_I[i])))
{
A_diag_data[j] = d;
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSCreate
*
* Allocate the AMS solver structure.
*--------------------------------------------------------------------------*/
void * hypre_AMSCreate()
{
hypre_AMSData *ams_data;
ams_data = hypre_CTAlloc(hypre_AMSData, 1, HYPRE_MEMORY_HOST);
/* Default parameters */
ams_data -> dim = 3; /* 3D problem */
ams_data -> maxit = 20; /* perform at most 20 iterations */
ams_data -> tol = 1e-6; /* convergence tolerance */
ams_data -> print_level = 1; /* print residual norm at each step */
ams_data -> cycle_type = 1; /* a 3-level multiplicative solver */
ams_data -> A_relax_type = 2; /* offd-l1-scaled GS */
ams_data -> A_relax_times = 1; /* one relaxation sweep */
ams_data -> A_relax_weight = 1.0; /* damping parameter */
ams_data -> A_omega = 1.0; /* SSOR coefficient */
ams_data -> A_cheby_order = 2; /* Cheby: order (1 -4 are vaild) */
ams_data -> A_cheby_fraction = .3; /* Cheby: fraction of spectrum to smooth */
ams_data -> B_G_coarsen_type = 10; /* HMIS coarsening */
ams_data -> B_G_agg_levels = 1; /* Levels of aggressive coarsening */
ams_data -> B_G_relax_type = 3; /* hybrid G-S/Jacobi */
ams_data -> B_G_theta = 0.25; /* strength threshold */
ams_data -> B_G_interp_type = 0; /* interpolation type */
ams_data -> B_G_Pmax = 0; /* max nonzero elements in interp. rows */
ams_data -> B_Pi_coarsen_type = 10; /* HMIS coarsening */
ams_data -> B_Pi_agg_levels = 1; /* Levels of aggressive coarsening */
ams_data -> B_Pi_relax_type = 3; /* hybrid G-S/Jacobi */
ams_data -> B_Pi_theta = 0.25; /* strength threshold */
ams_data -> B_Pi_interp_type = 0; /* interpolation type */
ams_data -> B_Pi_Pmax = 0; /* max nonzero elements in interp. rows */
ams_data -> beta_is_zero = 0; /* the problem has a mass term */
/* By default, do l1-GS smoothing on the coarsest grid */
ams_data -> B_G_coarse_relax_type = 8;
ams_data -> B_Pi_coarse_relax_type = 8;
/* The rest of the fields are initialized using the Set functions */
ams_data -> A = NULL;
ams_data -> G = NULL;
ams_data -> A_G = NULL;
ams_data -> B_G = 0;
ams_data -> Pi = NULL;
ams_data -> A_Pi = NULL;
ams_data -> B_Pi = 0;
ams_data -> x = NULL;
ams_data -> y = NULL;
ams_data -> z = NULL;
ams_data -> Gx = NULL;
ams_data -> Gy = NULL;
ams_data -> Gz = NULL;
ams_data -> r0 = NULL;
ams_data -> g0 = NULL;
ams_data -> r1 = NULL;
ams_data -> g1 = NULL;
ams_data -> r2 = NULL;
ams_data -> g2 = NULL;
ams_data -> Pix = NULL;
ams_data -> Piy = NULL;
ams_data -> Piz = NULL;
ams_data -> A_Pix = NULL;
ams_data -> A_Piy = NULL;
ams_data -> A_Piz = NULL;
ams_data -> B_Pix = 0;
ams_data -> B_Piy = 0;
ams_data -> B_Piz = 0;
ams_data -> interior_nodes = NULL;
ams_data -> G0 = NULL;
ams_data -> A_G0 = NULL;
ams_data -> B_G0 = 0;
ams_data -> projection_frequency = 5;
ams_data -> A_l1_norms = NULL;
ams_data -> A_max_eig_est = 0;
ams_data -> A_min_eig_est = 0;
ams_data -> owns_Pi = 1;
ams_data -> owns_A_G = 0;
ams_data -> owns_A_Pi = 0;
return (void *) ams_data;
}
/*--------------------------------------------------------------------------
* hypre_AMSDestroy
*
* Deallocate the AMS solver structure. Note that the input data (given
* through the Set functions) is not destroyed.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSDestroy(void *solver)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (!ams_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (ams_data -> owns_A_G)
if (ams_data -> A_G)
hypre_ParCSRMatrixDestroy(ams_data -> A_G);
if (!ams_data -> beta_is_zero)
if (ams_data -> B_G)
HYPRE_BoomerAMGDestroy(ams_data -> B_G);
if (ams_data -> owns_Pi && ams_data -> Pi)
hypre_ParCSRMatrixDestroy(ams_data -> Pi);
if (ams_data -> owns_A_Pi)
if (ams_data -> A_Pi)
hypre_ParCSRMatrixDestroy(ams_data -> A_Pi);
if (ams_data -> B_Pi)
HYPRE_BoomerAMGDestroy(ams_data -> B_Pi);
if (ams_data -> owns_Pi && ams_data -> Pix)
hypre_ParCSRMatrixDestroy(ams_data -> Pix);
if (ams_data -> A_Pix)
hypre_ParCSRMatrixDestroy(ams_data -> A_Pix);
if (ams_data -> B_Pix)
HYPRE_BoomerAMGDestroy(ams_data -> B_Pix);
if (ams_data -> owns_Pi && ams_data -> Piy)
hypre_ParCSRMatrixDestroy(ams_data -> Piy);
if (ams_data -> A_Piy)
hypre_ParCSRMatrixDestroy(ams_data -> A_Piy);
if (ams_data -> B_Piy)
HYPRE_BoomerAMGDestroy(ams_data -> B_Piy);
if (ams_data -> owns_Pi && ams_data -> Piz)
hypre_ParCSRMatrixDestroy(ams_data -> Piz);
if (ams_data -> A_Piz)
hypre_ParCSRMatrixDestroy(ams_data -> A_Piz);
if (ams_data -> B_Piz)
HYPRE_BoomerAMGDestroy(ams_data -> B_Piz);
if (ams_data -> r0)
hypre_ParVectorDestroy(ams_data -> r0);
if (ams_data -> g0)
hypre_ParVectorDestroy(ams_data -> g0);
if (ams_data -> r1)
hypre_ParVectorDestroy(ams_data -> r1);
if (ams_data -> g1)
hypre_ParVectorDestroy(ams_data -> g1);
if (ams_data -> r2)
hypre_ParVectorDestroy(ams_data -> r2);
if (ams_data -> g2)
hypre_ParVectorDestroy(ams_data -> g2);
if (ams_data -> G0)
hypre_ParCSRMatrixDestroy(ams_data -> A);
if (ams_data -> G0)
hypre_ParCSRMatrixDestroy(ams_data -> G0);
if (ams_data -> A_G0)
hypre_ParCSRMatrixDestroy(ams_data -> A_G0);
if (ams_data -> B_G0)
HYPRE_BoomerAMGDestroy(ams_data -> B_G0);
hypre_SeqVectorDestroy(ams_data -> A_l1_norms);
/* G, x, y ,z, Gx, Gy and Gz are not destroyed */
if (ams_data)
{
hypre_TFree(ams_data, HYPRE_MEMORY_HOST);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetDimension
*
* Set problem dimension (2 or 3). By default we assume dim = 3.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetDimension(void *solver,
HYPRE_Int dim)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (dim != 2 && dim != 3)
hypre_error_in_arg(2);
ams_data -> dim = dim;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetDiscreteGradient
*
* Set the discrete gradient matrix G.
* This function should be called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetDiscreteGradient(void *solver,
hypre_ParCSRMatrix *G)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> G = G;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetCoordinateVectors
*
* Set the x, y and z coordinates of the vertices in the mesh.
*
* Either SetCoordinateVectors or SetEdgeConstantVectors should be
* called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetCoordinateVectors(void *solver,
hypre_ParVector *x,
hypre_ParVector *y,
hypre_ParVector *z)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> x = x;
ams_data -> y = y;
ams_data -> z = z;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetEdgeConstantVectors
*
* Set the vectors Gx, Gy and Gz which give the representations of
* the constant vector fields (1,0,0), (0,1,0) and (0,0,1) in the
* edge element basis.
*
* Either SetCoordinateVectors or SetEdgeConstantVectors should be
* called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetEdgeConstantVectors(void *solver,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> Gx = Gx;
ams_data -> Gy = Gy;
ams_data -> Gz = Gz;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetInterpolations
*
* Set the (components of) the Nedelec interpolation matrix Pi=[Pix,Piy,Piz].
*
* This function is generally intended to be used only for high-order Nedelec
* discretizations (in the lowest order case, Pi is constructed internally in
* AMS from the discreet gradient matrix and the coordinates of the vertices),
* though it can also be used in the lowest-order case or for other types of
* discretizations (e.g. ones based on the second family of Nedelec elements).
*
* By definition, Pi is the matrix representation of the linear operator that
* interpolates (high-order) vector nodal finite elements into the (high-order)
* Nedelec space. The component matrices are defined as Pix phi = Pi (phi,0,0)
* and similarly for Piy and Piz. Note that all these operators depend on the
* choice of the basis and degrees of freedom in the high-order spaces.
*
* The column numbering of Pi should be node-based, i.e. the x/y/z components of
* the first node (vertex or high-order dof) should be listed first, followed by
* the x/y/z components of the second node and so on (see the documentation of
* HYPRE_BoomerAMGSetDofFunc).
*
* If used, this function should be called before hypre_AMSSetup() and there is
* no need to provide the vertex coordinates. Furthermore, only one of the sets
* {Pi} and {Pix,Piy,Piz} needs to be specified (though it is OK to provide
* both). If Pix is NULL, then scalar Pi-based AMS cycles, i.e. those with
* cycle_type > 10, will be unavailable. Similarly, AMS cycles based on
* monolithic Pi (cycle_type < 10) require that Pi is not NULL.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetInterpolations(void *solver,
hypre_ParCSRMatrix *Pi,
hypre_ParCSRMatrix *Pix,
hypre_ParCSRMatrix *Piy,
hypre_ParCSRMatrix *Piz)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> Pi = Pi;
ams_data -> Pix = Pix;
ams_data -> Piy = Piy;
ams_data -> Piz = Piz;
ams_data -> owns_Pi = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetAlphaPoissonMatrix
*
* Set the matrix corresponding to the Poisson problem with coefficient
* alpha (the curl-curl term coefficient in the Maxwell problem).
*
* If this function is called, the coarse space solver on the range
* of Pi^T is a block-diagonal version of A_Pi. If this function is not
* called, the coarse space solver on the range of Pi^T is constructed
* as Pi^T A Pi in hypre_AMSSetup().
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetAlphaPoissonMatrix(void *solver,
hypre_ParCSRMatrix *A_Pi)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_Pi = A_Pi;
/* Penalize the eliminated degrees of freedom */
hypre_ParCSRMatrixSetDiagRows(A_Pi, HYPRE_REAL_MAX);
/* Make sure that the first entry in each row is the diagonal one. */
/* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_Pi)); */
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetBetaPoissonMatrix
*
* Set the matrix corresponding to the Poisson problem with coefficient
* beta (the mass term coefficient in the Maxwell problem).
*
* This function call is optional - if not given, the Poisson matrix will
* be computed in hypre_AMSSetup(). If the given matrix is NULL, we assume
* that beta is 0 and use two-level (instead of three-level) methods.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetBetaPoissonMatrix(void *solver,
hypre_ParCSRMatrix *A_G)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_G = A_G;
if (!A_G)
ams_data -> beta_is_zero = 1;
else
{
/* Penalize the eliminated degrees of freedom */
hypre_ParCSRMatrixSetDiagRows(A_G, HYPRE_REAL_MAX);
/* Make sure that the first entry in each row is the diagonal one. */
/* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_G)); */
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetInteriorNodes
*
* Set the list of nodes which are interior to the zero-conductivity region.
* A node is interior if interior_nodes[i] == 1.0.
*
* Should be called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetInteriorNodes(void *solver,
hypre_ParVector *interior_nodes)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> interior_nodes = interior_nodes;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetProjectionFrequency
*
* How often to project the r.h.s. onto the compatible sub-space Ker(G0^T),
* when iterating with the solver.
*
* The default value is every 5th iteration.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetProjectionFrequency(void *solver,
HYPRE_Int projection_frequency)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> projection_frequency = projection_frequency;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetMaxIter
*
* Set the maximum number of iterations in the three-level method.
* The default value is 20. To use the AMS solver as a preconditioner,
* set maxit to 1, tol to 0.0 and print_level to 0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetMaxIter(void *solver,
HYPRE_Int maxit)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> maxit = maxit;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetTol
*
* Set the convergence tolerance (if the method is used as a solver).
* The default value is 1e-6.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetTol(void *solver,
HYPRE_Real tol)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> tol = tol;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetCycleType
*
* Choose which three-level solver to use. Possible values are:
*
* 1 = 3-level multipl. solver (01210) <-- small solution time
* 2 = 3-level additive solver (0+1+2)
* 3 = 3-level multipl. solver (02120)
* 4 = 3-level additive solver (010+2)
* 5 = 3-level multipl. solver (0102010) <-- small solution time
* 6 = 3-level additive solver (1+020)
* 7 = 3-level multipl. solver (0201020) <-- small number of iterations
* 8 = 3-level additive solver (0(1+2)0) <-- small solution time
* 9 = 3-level multipl. solver (01210) with discrete divergence
* 11 = 5-level multipl. solver (013454310) <-- small solution time, memory
* 12 = 5-level additive solver (0+1+3+4+5)
* 13 = 5-level multipl. solver (034515430) <-- small solution time, memory
* 14 = 5-level additive solver (01(3+4+5)10)
* 20 = 2-level multipl. solver (0[12]0)
*
* 0 = a Hiptmair-like smoother (010)
*
* The default value is 1.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetCycleType(void *solver,
HYPRE_Int cycle_type)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> cycle_type = cycle_type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetPrintLevel
*
* Control how much information is printed during the solution iterations.
* The defaut values is 1 (print residual norm at each step).
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetPrintLevel(void *solver,
HYPRE_Int print_level)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> print_level = print_level;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetSmoothingOptions
*
* Set relaxation parameters for A. Default values: 2, 1, 1.0, 1.0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetSmoothingOptions(void *solver,
HYPRE_Int A_relax_type,
HYPRE_Int A_relax_times,
HYPRE_Real A_relax_weight,
HYPRE_Real A_omega)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_relax_type = A_relax_type;
ams_data -> A_relax_times = A_relax_times;
ams_data -> A_relax_weight = A_relax_weight;
ams_data -> A_omega = A_omega;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetChebySmoothingOptions
* AB: note: this could be added to the above,
* but I didn't want to change parameter list)
* Set parameters for chebyshev smoother for A. Default values: 2,.3.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetChebySmoothingOptions(void *solver,
HYPRE_Int A_cheby_order,
HYPRE_Int A_cheby_fraction)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_cheby_order = A_cheby_order;
ams_data -> A_cheby_fraction = A_cheby_fraction;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetAlphaAMGOptions
*
* Set AMG parameters for B_Pi. Default values: 10, 1, 3, 0.25, 0, 0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetAlphaAMGOptions(void *solver,
HYPRE_Int B_Pi_coarsen_type,
HYPRE_Int B_Pi_agg_levels,
HYPRE_Int B_Pi_relax_type,
HYPRE_Real B_Pi_theta,
HYPRE_Int B_Pi_interp_type,
HYPRE_Int B_Pi_Pmax)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> B_Pi_coarsen_type = B_Pi_coarsen_type;
ams_data -> B_Pi_agg_levels = B_Pi_agg_levels;
ams_data -> B_Pi_relax_type = B_Pi_relax_type;
ams_data -> B_Pi_theta = B_Pi_theta;
ams_data -> B_Pi_interp_type = B_Pi_interp_type;
ams_data -> B_Pi_Pmax = B_Pi_Pmax;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetAlphaAMGCoarseRelaxType
*
* Set the AMG coarsest level relaxation for B_Pi. Default value: 8.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetAlphaAMGCoarseRelaxType(void *solver,
HYPRE_Int B_Pi_coarse_relax_type)
{
hypre_AMSData *ams_data = (hypre_AMSData *)solver;
ams_data -> B_Pi_coarse_relax_type = B_Pi_coarse_relax_type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetBetaAMGOptions
*
* Set AMG parameters for B_G. Default values: 10, 1, 3, 0.25, 0, 0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetBetaAMGOptions(void *solver,
HYPRE_Int B_G_coarsen_type,
HYPRE_Int B_G_agg_levels,
HYPRE_Int B_G_relax_type,
HYPRE_Real B_G_theta,
HYPRE_Int B_G_interp_type,
HYPRE_Int B_G_Pmax)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> B_G_coarsen_type = B_G_coarsen_type;
ams_data -> B_G_agg_levels = B_G_agg_levels;
ams_data -> B_G_relax_type = B_G_relax_type;
ams_data -> B_G_theta = B_G_theta;
ams_data -> B_G_interp_type = B_G_interp_type;
ams_data -> B_G_Pmax = B_G_Pmax;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetBetaAMGCoarseRelaxType
*
* Set the AMG coarsest level relaxation for B_G. Default value: 8.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetBetaAMGCoarseRelaxType(void *solver,
HYPRE_Int B_G_coarse_relax_type)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> B_G_coarse_relax_type = B_G_coarse_relax_type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSComputePi
*
* Construct the Pi interpolation matrix, which maps the space of vector
* linear finite elements to the space of edge finite elements.
*
* The construction is based on the fact that Pi = [Pi_x, Pi_y, Pi_z],
* where each block has the same sparsity structure as G, and the entries
* can be computed from the vectors Gx, Gy, Gz.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSComputePi(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *G,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz,
HYPRE_Int dim,
hypre_ParCSRMatrix **Pi_ptr)
{
hypre_ParCSRMatrix *Pi;
/* Compute Pi = [Pi_x, Pi_y, Pi_z] */
{
HYPRE_Int i, j, d;
HYPRE_Real *Gx_data, *Gy_data, *Gz_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(G);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G);
HYPRE_BigInt global_num_cols = dim*hypre_ParCSRMatrixGlobalNumCols(G);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G);
HYPRE_BigInt *col_starts;
HYPRE_Int col_starts_size;
HYPRE_Int num_cols_offd = dim*hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G));
HYPRE_Int num_nonzeros_diag = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G));
HYPRE_Int num_nonzeros_offd = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G));
HYPRE_BigInt *col_starts_G = hypre_ParCSRMatrixColStarts(G);
#ifdef HYPRE_NO_GLOBAL_PARTITION
col_starts_size = 2;
#else
HYPRE_Int num_procs;
hypre_MPI_Comm_size(comm, &num_procs);
col_starts_size = num_procs+1;
#endif
col_starts = hypre_TAlloc(HYPRE_BigInt, col_starts_size, HYPRE_MEMORY_HOST);
for (i = 0; i < col_starts_size; i++)
col_starts[i] = (HYPRE_BigInt)dim * col_starts_G[i];
Pi = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Pi) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Pi) = 0;
hypre_ParCSRMatrixOwnsColStarts(Pi) = 1;
hypre_ParCSRMatrixInitialize(Pi);
Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx));
Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy));
if (dim == 3)
Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz));
/* Fill-in the diagonal part */
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *Pi_diag = hypre_ParCSRMatrixDiag(Pi);
HYPRE_Int *Pi_diag_I = hypre_CSRMatrixI(Pi_diag);
HYPRE_Int *Pi_diag_J = hypre_CSRMatrixJ(Pi_diag);
HYPRE_Real *Pi_diag_data = hypre_CSRMatrixData(Pi_diag);
for (i = 0; i < G_diag_nrows+1; i++)
Pi_diag_I[i] = dim * G_diag_I[i];
for (i = 0; i < G_diag_nnz; i++)
for (d = 0; d < dim; d++)
Pi_diag_J[dim*i+d] = dim*G_diag_J[i]+d;
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
if (dim == 3)
*Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i];
}
}
/* Fill-in the off-diagonal part */
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *Pi_offd = hypre_ParCSRMatrixOffd(Pi);
HYPRE_Int *Pi_offd_I = hypre_CSRMatrixI(Pi_offd);
HYPRE_Int *Pi_offd_J = hypre_CSRMatrixJ(Pi_offd);
HYPRE_Real *Pi_offd_data = hypre_CSRMatrixData(Pi_offd);
HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_BigInt *Pi_cmap = hypre_ParCSRMatrixColMapOffd(Pi);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
Pi_offd_I[i] = dim * G_offd_I[i];
for (i = 0; i < G_offd_nnz; i++)
for (d = 0; d < dim; d++)
Pi_offd_J[dim*i+d] = dim*G_offd_J[i]+d;
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
if (dim == 3)
*Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
for (d = 0; d < dim; d++)
Pi_cmap[dim*i+d] = (HYPRE_BigInt)dim*G_cmap[i]+(HYPRE_BigInt)d;
}
}
*Pi_ptr = Pi;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSComputePixyz
*
* Construct the components Pix, Piy, Piz of the interpolation matrix Pi,
* which maps the space of vector linear finite elements to the space of
* edge finite elements.
*
* The construction is based on the fact that each component has the same
* sparsity structure as G, and the entries can be computed from the vectors
* Gx, Gy, Gz.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSComputePixyz(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *G,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz,
HYPRE_Int dim,
hypre_ParCSRMatrix **Pix_ptr,
hypre_ParCSRMatrix **Piy_ptr,
hypre_ParCSRMatrix **Piz_ptr)
{
hypre_ParCSRMatrix *Pix, *Piy, *Piz;
/* Compute Pix, Piy, Piz */
{
HYPRE_Int i, j;
HYPRE_Real *Gx_data, *Gy_data, *Gz_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(G);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G);
HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(G);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G);
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(G);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G));
HYPRE_Int num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G));
HYPRE_Int num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G));
Pix = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Pix) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Pix) = 0;
hypre_ParCSRMatrixOwnsColStarts(Pix) = 0;
hypre_ParCSRMatrixInitialize(Pix);
Piy = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Piy) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Piy) = 0;
hypre_ParCSRMatrixOwnsColStarts(Piy) = 0;
hypre_ParCSRMatrixInitialize(Piy);
if (dim == 3)
{
Piz = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Piz) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Piz) = 0;
hypre_ParCSRMatrixOwnsColStarts(Piz) = 0;
hypre_ParCSRMatrixInitialize(Piz);
}
Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx));
Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy));
if (dim == 3)
Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz));
/* Fill-in the diagonal part */
if (dim == 3)
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix);
HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag);
HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag);
HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag);
hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy);
HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag);
HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag);
HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag);
hypre_CSRMatrix *Piz_diag = hypre_ParCSRMatrixDiag(Piz);
HYPRE_Int *Piz_diag_I = hypre_CSRMatrixI(Piz_diag);
HYPRE_Int *Piz_diag_J = hypre_CSRMatrixJ(Piz_diag);
HYPRE_Real *Piz_diag_data = hypre_CSRMatrixData(Piz_diag);
for (i = 0; i < G_diag_nrows+1; i++)
{
Pix_diag_I[i] = G_diag_I[i];
Piy_diag_I[i] = G_diag_I[i];
Piz_diag_I[i] = G_diag_I[i];
}
for (i = 0; i < G_diag_nnz; i++)
{
Pix_diag_J[i] = G_diag_J[i];
Piy_diag_J[i] = G_diag_J[i];
Piz_diag_J[i] = G_diag_J[i];
}
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
*Piz_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i];
}
}
else
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix);
HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag);
HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag);
HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag);
hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy);
HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag);
HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag);
HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag);
for (i = 0; i < G_diag_nrows+1; i++)
{
Pix_diag_I[i] = G_diag_I[i];
Piy_diag_I[i] = G_diag_I[i];
}
for (i = 0; i < G_diag_nnz; i++)
{
Pix_diag_J[i] = G_diag_J[i];
Piy_diag_J[i] = G_diag_J[i];
}
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
}
}
/* Fill-in the off-diagonal part */
if (dim == 3)
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix);
HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd);
HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd);
HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd);
hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy);
HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd);
HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd);
HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd);
hypre_CSRMatrix *Piz_offd = hypre_ParCSRMatrixOffd(Piz);
HYPRE_Int *Piz_offd_I = hypre_CSRMatrixI(Piz_offd);
HYPRE_Int *Piz_offd_J = hypre_CSRMatrixJ(Piz_offd);
HYPRE_Real *Piz_offd_data = hypre_CSRMatrixData(Piz_offd);
HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix);
HYPRE_BigInt *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy);
HYPRE_BigInt *Piz_cmap = hypre_ParCSRMatrixColMapOffd(Piz);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
{
Pix_offd_I[i] = G_offd_I[i];
Piy_offd_I[i] = G_offd_I[i];
Piz_offd_I[i] = G_offd_I[i];
}
for (i = 0; i < G_offd_nnz; i++)
{
Pix_offd_J[i] = G_offd_J[i];
Piy_offd_J[i] = G_offd_J[i];
Piz_offd_J[i] = G_offd_J[i];
}
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
*Piz_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
{
Pix_cmap[i] = G_cmap[i];
Piy_cmap[i] = G_cmap[i];
Piz_cmap[i] = G_cmap[i];
}
}
else
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix);
HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd);
HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd);
HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd);
hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy);
HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd);
HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd);
HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd);
HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix);
HYPRE_BigInt *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
{
Pix_offd_I[i] = G_offd_I[i];
Piy_offd_I[i] = G_offd_I[i];
}
for (i = 0; i < G_offd_nnz; i++)
{
Pix_offd_J[i] = G_offd_J[i];
Piy_offd_J[i] = G_offd_J[i];
}
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
{
Pix_cmap[i] = G_cmap[i];
Piy_cmap[i] = G_cmap[i];
}
}
}
*Pix_ptr = Pix;
*Piy_ptr = Piy;
if (dim == 3)
*Piz_ptr = Piz;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSComputeGPi
*
* Construct the matrix [G,Pi] which can be considered an interpolation
* matrix from S_h^4 (4 copies of the scalar linear finite element space)
* to the edge finite elements space.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSComputeGPi(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *G,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz,
HYPRE_Int dim,
hypre_ParCSRMatrix **GPi_ptr)
{
hypre_ParCSRMatrix *GPi;
/* Take into account G */
dim++;
/* Compute GPi = [Pi_x, Pi_y, Pi_z, G] */
{
HYPRE_Int i, j, d;
HYPRE_Real *Gx_data, *Gy_data, *Gz_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(G);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G);
HYPRE_BigInt global_num_cols = dim*hypre_ParCSRMatrixGlobalNumCols(G);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G);
HYPRE_BigInt *col_starts;
HYPRE_Int col_starts_size;
HYPRE_Int num_cols_offd = dim*hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G));
HYPRE_Int num_nonzeros_diag = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G));
HYPRE_Int num_nonzeros_offd = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G));
HYPRE_BigInt *col_starts_G = hypre_ParCSRMatrixColStarts(G);
#ifdef HYPRE_NO_GLOBAL_PARTITION
col_starts_size = 2;
#else
HYPRE_Int num_procs;
hypre_MPI_Comm_size(comm, &num_procs);
col_starts_size = num_procs+1;
#endif
col_starts = hypre_TAlloc(HYPRE_BigInt, col_starts_size, HYPRE_MEMORY_HOST);
for (i = 0; i < col_starts_size; i++)
col_starts[i] = (HYPRE_BigInt) dim * col_starts_G[i];
GPi = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(GPi) = 1;
hypre_ParCSRMatrixOwnsRowStarts(GPi) = 0;
hypre_ParCSRMatrixOwnsColStarts(GPi) = 1;
hypre_ParCSRMatrixInitialize(GPi);
Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx));
Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy));
if (dim == 4)
Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz));
/* Fill-in the diagonal part */
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *GPi_diag = hypre_ParCSRMatrixDiag(GPi);
HYPRE_Int *GPi_diag_I = hypre_CSRMatrixI(GPi_diag);
HYPRE_Int *GPi_diag_J = hypre_CSRMatrixJ(GPi_diag);
HYPRE_Real *GPi_diag_data = hypre_CSRMatrixData(GPi_diag);
for (i = 0; i < G_diag_nrows+1; i++)
GPi_diag_I[i] = dim * G_diag_I[i];
for (i = 0; i < G_diag_nnz; i++)
for (d = 0; d < dim; d++)
GPi_diag_J[dim*i+d] = dim*G_diag_J[i]+d;
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*GPi_diag_data++ = G_diag_data[j];
*GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
if (dim == 4)
*GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i];
}
}
/* Fill-in the off-diagonal part */
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *GPi_offd = hypre_ParCSRMatrixOffd(GPi);
HYPRE_Int *GPi_offd_I = hypre_CSRMatrixI(GPi_offd);
HYPRE_Int *GPi_offd_J = hypre_CSRMatrixJ(GPi_offd);
HYPRE_Real *GPi_offd_data = hypre_CSRMatrixData(GPi_offd);
HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_BigInt *GPi_cmap = hypre_ParCSRMatrixColMapOffd(GPi);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
GPi_offd_I[i] = dim * G_offd_I[i];
for (i = 0; i < G_offd_nnz; i++)
for (d = 0; d < dim; d++)
GPi_offd_J[dim*i+d] = dim*G_offd_J[i]+d;
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*GPi_offd_data++ = G_offd_data[j];
*GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
if (dim == 4)
*GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
for (d = 0; d < dim; d++)
GPi_cmap[dim*i+d] = dim*G_cmap[i]+d;
}
}
*GPi_ptr = GPi;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetup
*
* Construct the AMS solver components.
*
* The following functions need to be called before hypre_AMSSetup():
* - hypre_AMSSetDimension() (if solving a 2D problem)
* - hypre_AMSSetDiscreteGradient()
* - hypre_AMSSetCoordinateVectors() or hypre_AMSSetEdgeConstantVectors
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetup(void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
HYPRE_Int input_info = 0;
ams_data -> A = A;
/* Modifications for problems with zero-conductivity regions */
if (ams_data -> interior_nodes)
{
hypre_ParCSRMatrix *G0t, *Aorig = A;
/* Make sure that multiple Setup()+Solve() give identical results */
ams_data -> solve_counter = 0;
/* Construct the discrete gradient matrix for the zero-conductivity region
by eliminating the zero-conductivity nodes from G^t. The range of G0
represents the kernel of A, i.e. the gradients of nodal basis functions
supported in zero-conductivity regions. */
hypre_ParCSRMatrixTranspose(ams_data -> G, &G0t, 1);
{
HYPRE_Int i, j;
HYPRE_Int nv = hypre_ParCSRMatrixNumCols(ams_data -> G);
hypre_CSRMatrix *G0td = hypre_ParCSRMatrixDiag(G0t);
HYPRE_Int *G0tdI = hypre_CSRMatrixI(G0td);
HYPRE_Real *G0tdA = hypre_CSRMatrixData(G0td);
hypre_CSRMatrix *G0to = hypre_ParCSRMatrixOffd(G0t);
HYPRE_Int *G0toI = hypre_CSRMatrixI(G0to);
HYPRE_Real *G0toA = hypre_CSRMatrixData(G0to);
HYPRE_Real *interior_nodes_data=hypre_VectorData(
hypre_ParVectorLocalVector((hypre_ParVector*) ams_data -> interior_nodes));
for (i = 0; i < nv; i++)
{
if (interior_nodes_data[i] != 1)
{
for (j = G0tdI[i]; j < G0tdI[i+1]; j++)
G0tdA[j] = 0.0;
if (G0toI)
for (j = G0toI[i]; j < G0toI[i+1]; j++)
G0toA[j] = 0.0;
}
}
}
hypre_ParCSRMatrixTranspose(G0t, & ams_data -> G0, 1);
/* Construct the subspace matrix A_G0 = G0^T G0 */
ams_data -> A_G0 = hypre_ParMatmul(G0t, ams_data -> G0);
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G0);
/* Create AMG solver for A_G0 */
HYPRE_BoomerAMGCreate(&ams_data -> B_G0);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G0, ams_data -> B_G_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G0, ams_data -> B_G_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G0, ams_data -> B_G_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G0, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G0, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_G0, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G0, 3); /* use just a few V-cycles */
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G0, ams_data -> B_G_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_G0, ams_data -> B_G_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G0, ams_data -> B_G_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G0, 2); /* don't coarsen to 0 */
/* Generally, don't use exact solve on the coarsest level (matrix may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G0, ams_data -> B_G_coarse_relax_type, 3);
HYPRE_BoomerAMGSetup(ams_data -> B_G0,
(HYPRE_ParCSRMatrix)ams_data -> A_G0,
0, 0);
/* Construct the preconditioner for ams_data->A = A + G0 G0^T.
NOTE: this can be optimized significantly by taking into account that
the sparsity pattern of A is subset of the sparsity pattern of G0 G0^T */
{
hypre_ParCSRMatrix *A = hypre_ParMatmul(ams_data -> G0, G0t);
hypre_ParCSRMatrix *B = Aorig;
hypre_ParCSRMatrix **C_ptr = &ams_data -> A;
hypre_ParCSRMatrix *C;
HYPRE_Real factor, lfactor;
/* scale (penalize) G0 G0^T before adding it to the matrix */
{
HYPRE_Int i;
HYPRE_Int B_num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(B));
HYPRE_Real *B_diag_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(B));
HYPRE_Real *B_offd_data = hypre_CSRMatrixData(hypre_ParCSRMatrixOffd(B));
HYPRE_Int *B_diag_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(B));
HYPRE_Int *B_offd_i = hypre_CSRMatrixI(hypre_ParCSRMatrixOffd(B));
lfactor = -1;
for (i = 0; i < B_diag_i[B_num_rows]; i++)
if (fabs(B_diag_data[i]) > lfactor)
lfactor = fabs(B_diag_data[i]);
for (i = 0; i < B_offd_i[B_num_rows]; i++)
if (fabs(B_offd_data[i]) > lfactor)
lfactor = fabs(B_offd_data[i]);
lfactor *= 1e-10; /* scaling factor: max|A_ij|*1e-10 */
hypre_MPI_Allreduce(&lfactor, &factor, 1, HYPRE_MPI_REAL, hypre_MPI_MAX,
hypre_ParCSRMatrixComm(A));
}
hypre_ParcsrAdd(factor, A, 1.0, B, &C);
/*hypre_CSRMatrix *A_local, *B_local, *C_local, *C_tmp;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A);
HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A));
HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A));
HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A));
HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B));
HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B));
HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B));
A_local = hypre_MergeDiagAndOffd(A);
B_local = hypre_MergeDiagAndOffd(B);*/
/* scale (penalize) G0 G0^T before adding it to the matrix */
/*{
HYPRE_Int i, nnz = hypre_CSRMatrixNumNonzeros(A_local);
HYPRE_Real *data = hypre_CSRMatrixData(A_local);
HYPRE_Real *dataB = hypre_CSRMatrixData(B_local);
HYPRE_Int nnzB = hypre_CSRMatrixNumNonzeros(B_local);
HYPRE_Real factor, lfactor;
lfactor = -1;
for (i = 0; i < nnzB; i++)
if (fabs(dataB[i]) > lfactor)
lfactor = fabs(dataB[i]);
lfactor *= 1e-10;
hypre_MPI_Allreduce(&lfactor, &factor, 1, HYPRE_MPI_REAL, hypre_MPI_MAX,
hypre_ParCSRMatrixComm(A));
for (i = 0; i < nnz; i++)
data[i] *= factor;
}
C_tmp = hypre_CSRMatrixBigAdd(A_local, B_local);
C_local = hypre_CSRMatrixBigDeleteZeros(C_tmp,0.0);
if (C_local)
hypre_CSRMatrixDestroy(C_tmp);
else
C_local = C_tmp;
C = hypre_ParCSRMatrixCreate (comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
A_num_cols_offd + B_num_cols_offd,
A_num_nonzeros_diag + B_num_nonzeros_diag,
A_num_nonzeros_offd + B_num_nonzeros_offd);
GenerateDiagAndOffd(C_local, C,
hypre_ParCSRMatrixFirstColDiag(A),
hypre_ParCSRMatrixLastColDiag(A));
hypre_ParCSRMatrixOwnsRowStarts(C) = 0;
hypre_ParCSRMatrixOwnsColStarts(C) = 1;
hypre_ParCSRMatrixOwnsColStarts(G0t) = 0;
hypre_CSRMatrixDestroy(A_local);
hypre_CSRMatrixDestroy(B_local);
hypre_CSRMatrixDestroy(C_local);
*/
hypre_ParCSRMatrixDestroy(A);
*C_ptr = C;
}
hypre_ParCSRMatrixDestroy(G0t);
}
/* Make sure that the first entry in each row is the diagonal one. */
/* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(ams_data -> A)); */
/* Compute the l1 norm of the rows of A */
if (ams_data -> A_relax_type >= 1 && ams_data -> A_relax_type <= 4)
{
HYPRE_Real *l1_norm_data = NULL;
hypre_ParCSRComputeL1Norms(ams_data -> A, ams_data -> A_relax_type, NULL, &l1_norm_data);
ams_data -> A_l1_norms = hypre_SeqVectorCreate(hypre_ParCSRMatrixNumRows(ams_data -> A));
hypre_VectorData(ams_data -> A_l1_norms) = l1_norm_data;
hypre_SeqVectorInitialize_v2(ams_data -> A_l1_norms, hypre_ParCSRMatrixMemoryLocation(ams_data -> A));
}
/* Chebyshev? */
if (ams_data -> A_relax_type == 16)
{
hypre_ParCSRMaxEigEstimateCG(ams_data->A, 1, 10,
&ams_data->A_max_eig_est,
&ams_data->A_min_eig_est);
}
/* If not given, compute Gx, Gy and Gz */
{
if (ams_data -> x != NULL && ams_data -> y != NULL &&
(ams_data -> dim == 2 || ams_data -> z != NULL))
input_info = 1;
if (ams_data -> Gx != NULL && ams_data -> Gy != NULL &&
(ams_data -> dim == 2 || ams_data -> Gz != NULL))
input_info = 2;
if (input_info == 1)
{
ams_data -> Gx = hypre_ParVectorInRangeOf(ams_data -> G);
hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> x, 0.0, ams_data -> Gx);
ams_data -> Gy = hypre_ParVectorInRangeOf(ams_data -> G);
hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> y, 0.0, ams_data -> Gy);
if (ams_data -> dim == 3)
{
ams_data -> Gz = hypre_ParVectorInRangeOf(ams_data -> G);
hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> z, 0.0, ams_data -> Gz);
}
}
}
if (ams_data -> Pi == NULL && ams_data -> Pix == NULL)
{
if (ams_data -> cycle_type == 20)
/* Construct the combined interpolation matrix [G,Pi] */
hypre_AMSComputeGPi(ams_data -> A,
ams_data -> G,
ams_data -> Gx,
ams_data -> Gy,
ams_data -> Gz,
ams_data -> dim,
&ams_data -> Pi);
else if (ams_data -> cycle_type > 10)
/* Construct Pi{x,y,z} instead of Pi = [Pix,Piy,Piz] */
hypre_AMSComputePixyz(ams_data -> A,
ams_data -> G,
ams_data -> Gx,
ams_data -> Gy,
ams_data -> Gz,
ams_data -> dim,
&ams_data -> Pix,
&ams_data -> Piy,
&ams_data -> Piz);
else
/* Construct the Pi interpolation matrix */
hypre_AMSComputePi(ams_data -> A,
ams_data -> G,
ams_data -> Gx,
ams_data -> Gy,
ams_data -> Gz,
ams_data -> dim,
&ams_data -> Pi);
}
/* Keep Gx, Gy and Gz only if use the method with discrete divergence
stabilization (where we use them to compute the local mesh size). */
if (input_info == 1 && ams_data -> cycle_type != 9)
{
hypre_ParVectorDestroy(ams_data -> Gx);
hypre_ParVectorDestroy(ams_data -> Gy);
if (ams_data -> dim == 3)
hypre_ParVectorDestroy(ams_data -> Gz);
}
/* Create the AMG solver on the range of G^T */
if (!ams_data -> beta_is_zero && ams_data -> cycle_type != 20)
{
HYPRE_BoomerAMGCreate(&ams_data -> B_G);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G, ams_data -> B_G_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G, ams_data -> B_G_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G, ams_data -> B_G_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_G, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G, ams_data -> B_G_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_G, ams_data -> B_G_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G, ams_data -> B_G_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G, 2); /* don't coarsen to 0 */
/* Generally, don't use exact solve on the coarsest level (matrix may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G, ams_data -> B_G_coarse_relax_type, 3);
if (ams_data -> cycle_type == 0)
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 2);
/* If not given, construct the coarse space matrix by RAP */
if (!ams_data -> A_G)
{
HYPRE_Int G_owned_col_starts;
if (!hypre_ParCSRMatrixCommPkg(ams_data -> G))
hypre_MatvecCommPkgCreate(ams_data -> G);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> A))
hypre_MatvecCommPkgCreate(ams_data -> A);
G_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> G);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> G,
ams_data -> A,
ams_data -> G,
&ams_data -> A_G);
/* Make sure that A_G has no zero rows (this can happen
if beta is zero in part of the domain). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G);
hypre_ParCSRMatrixOwnsColStarts(ams_data -> G) = G_owned_col_starts;
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_G) = 0;
ams_data -> owns_A_G = 1;
}
HYPRE_BoomerAMGSetup(ams_data -> B_G,
(HYPRE_ParCSRMatrix)ams_data -> A_G,
0, 0);
}
if (ams_data -> cycle_type > 10 && ams_data -> cycle_type != 20)
/* Create the AMG solvers on the range of Pi{x,y,z}^T */
{
HYPRE_Int P_owned_col_starts;
HYPRE_BoomerAMGCreate(&ams_data -> B_Pix);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pix, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pix, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pix, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Pix, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pix, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pix, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pix, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pix, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pix, 2);
HYPRE_BoomerAMGCreate(&ams_data -> B_Piy);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piy, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piy, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piy, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Piy, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piy, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piy, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piy, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piy, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piy, 2);
HYPRE_BoomerAMGCreate(&ams_data -> B_Piz);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piz, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piz, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piz, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Piz, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piz, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piz, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piz, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piz, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piz, 2);
/* Generally, don't use exact solve on the coarsest level (matrices may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_coarse_relax_type, 3);
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_coarse_relax_type, 3);
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_coarse_relax_type, 3);
if (ams_data -> cycle_type == 0)
{
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 2);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 2);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 2);
}
/* Construct the coarse space matrices by RAP */
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pix))
hypre_MatvecCommPkgCreate(ams_data -> Pix);
P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Pix);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pix,
ams_data -> A,
ams_data -> Pix,
&ams_data -> A_Pix);
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Pix) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Pix) = 0;
}
/* Make sure that A_Pix has no zero rows (this can happen
for some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pix);
HYPRE_BoomerAMGSetup(ams_data -> B_Pix,
(HYPRE_ParCSRMatrix)ams_data -> A_Pix,
0, 0);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piy))
hypre_MatvecCommPkgCreate(ams_data -> Piy);
P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Piy);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piy,
ams_data -> A,
ams_data -> Piy,
&ams_data -> A_Piy);
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Piy) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Piy) = 0;
}
/* Make sure that A_Piy has no zero rows (this can happen
for some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piy);
HYPRE_BoomerAMGSetup(ams_data -> B_Piy,
(HYPRE_ParCSRMatrix)ams_data -> A_Piy,
0, 0);
if (ams_data -> Piz)
{
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piz))
hypre_MatvecCommPkgCreate(ams_data -> Piz);
P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Piz);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piz,
ams_data -> A,
ams_data -> Piz,
&ams_data -> A_Piz);
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Piz) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Piz) = 0;
}
/* Make sure that A_Piz has no zero rows (this can happen
for some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piz);
HYPRE_BoomerAMGSetup(ams_data -> B_Piz,
(HYPRE_ParCSRMatrix)ams_data -> A_Piz,
0, 0);
}
}
else
/* Create the AMG solver on the range of Pi^T */
{
HYPRE_BoomerAMGCreate(&ams_data -> B_Pi);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pi, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pi, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pi, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Pi, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pi, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pi, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pi, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pi, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pi, 2); /* don't coarsen to 0 */
/* Generally, don't use exact solve on the coarsest level (matrix may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_coarse_relax_type, 3);
if (ams_data -> cycle_type == 0)
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 2);
/* If not given, construct the coarse space matrix by RAP and
notify BoomerAMG that this is a dim x dim block system. */
if (!ams_data -> A_Pi)
{
HYPRE_Int P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Pi);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pi))
hypre_MatvecCommPkgCreate(ams_data -> Pi);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> A))
hypre_MatvecCommPkgCreate(ams_data -> A);
if (ams_data -> cycle_type == 9)
{
/* Add a discrete divergence term to A before computing Pi^t A Pi */
{
hypre_ParCSRMatrix *Gt, *GGt, *ApGGt;
hypre_ParCSRMatrixTranspose(ams_data -> G, &Gt, 1);
hypre_ParCSRMatrixOwnsColStarts(Gt) = 0;
hypre_ParCSRMatrixOwnsRowStarts(Gt) = 0;
/* scale GGt by h^2 */
{
HYPRE_Real h2;
HYPRE_Int i, j, k, ne;
hypre_CSRMatrix *Gt_diag = hypre_ParCSRMatrixDiag(Gt);
HYPRE_Int Gt_num_rows = hypre_CSRMatrixNumRows(Gt_diag);
HYPRE_Int *Gt_diag_I = hypre_CSRMatrixI(Gt_diag);
HYPRE_Int *Gt_diag_J = hypre_CSRMatrixJ(Gt_diag);
HYPRE_Real *Gt_diag_data = hypre_CSRMatrixData(Gt_diag);
hypre_CSRMatrix *Gt_offd = hypre_ParCSRMatrixOffd(Gt);
HYPRE_Int *Gt_offd_I = hypre_CSRMatrixI(Gt_offd);
HYPRE_Real *Gt_offd_data = hypre_CSRMatrixData(Gt_offd);
HYPRE_Real *Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gx));
HYPRE_Real *Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gy));
HYPRE_Real *Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gz));
for (i = 0; i < Gt_num_rows; i++)
{
/* determine the characteristic mesh size for vertex i */
h2 = 0.0;
ne = 0;
for (j = Gt_diag_I[i]; j < Gt_diag_I[i+1]; j++)
{
k = Gt_diag_J[j];
h2 += Gx_data[k]*Gx_data[k]+Gy_data[k]*Gy_data[k]+Gz_data[k]*Gz_data[k];
ne++;
}
if (ne != 0)
{
h2 /= ne;
for (j = Gt_diag_I[i]; j < Gt_diag_I[i+1]; j++)
Gt_diag_data[j] *= h2;
for (j = Gt_offd_I[i]; j < Gt_offd_I[i+1]; j++)
Gt_offd_data[j] *= h2;
}
}
}
/* we only needed Gx, Gy and Gz to compute the local mesh size */
if (input_info == 1)
{
hypre_ParVectorDestroy(ams_data -> Gx);
hypre_ParVectorDestroy(ams_data -> Gy);
if (ams_data -> dim == 3)
hypre_ParVectorDestroy(ams_data -> Gz);
}
GGt = hypre_ParMatmul(ams_data -> G, Gt);
hypre_ParCSRMatrixDestroy(Gt);
/* hypre_ParCSRMatrixAdd(GGt, A, &ams_data -> A); */
hypre_ParcsrAdd(1.0, GGt, 1.0, ams_data -> A, &ApGGt);
/*{
hypre_ParCSRMatrix *A = GGt;
hypre_ParCSRMatrix *B = ams_data -> A;
hypre_ParCSRMatrix **C_ptr = &ApGGt;
hypre_ParCSRMatrix *C;
hypre_CSRMatrix *A_local, *B_local, *C_local;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A);
HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A));
HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A));
HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A));
HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B));
HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B));
HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B));
A_local = hypre_MergeDiagAndOffd(A);
B_local = hypre_MergeDiagAndOffd(B);
C_local = hypre_CSRMatrixBigAdd(A_local, B_local);
hypre_CSRMatrixBigJtoJ(C_local);
C = hypre_ParCSRMatrixCreate (comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
A_num_cols_offd + B_num_cols_offd,
A_num_nonzeros_diag + B_num_nonzeros_diag,
A_num_nonzeros_offd + B_num_nonzeros_offd);
GenerateDiagAndOffd(C_local, C,
hypre_ParCSRMatrixFirstColDiag(A),
hypre_ParCSRMatrixLastColDiag(A));
hypre_ParCSRMatrixOwnsRowStarts(C) = 0;
hypre_ParCSRMatrixOwnsColStarts(C) = 0;
hypre_CSRMatrixDestroy(A_local);
hypre_CSRMatrixDestroy(B_local);
hypre_CSRMatrixDestroy(C_local);
*C_ptr = C;
}*/
hypre_ParCSRMatrixDestroy(GGt);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi,
ApGGt,
ams_data -> Pi,
&ams_data -> A_Pi);
}
}
else
{
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi,
ams_data -> A,
ams_data -> Pi,
&ams_data -> A_Pi);
}
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Pi) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Pi) = 0;
}
ams_data -> owns_A_Pi = 1;
if (ams_data -> cycle_type != 20)
HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim);
else
HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim + 1);
/* HYPRE_BoomerAMGSetNodal(ams_data -> B_Pi, 1); */
}
/* Make sure that A_Pi has no zero rows (this can happen for
some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pi);
HYPRE_BoomerAMGSetup(ams_data -> B_Pi,
(HYPRE_ParCSRMatrix)ams_data -> A_Pi,
0, 0);
}
/* Allocate temporary vectors */
ams_data -> r0 = hypre_ParVectorInRangeOf(ams_data -> A);
ams_data -> g0 = hypre_ParVectorInRangeOf(ams_data -> A);
if (ams_data -> A_G)
{
ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_G);
ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_G);
}
if (ams_data -> r1 == NULL && ams_data -> A_Pix)
{
ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix);
ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix);
}
if (ams_data -> Pi)
{
ams_data -> r2 = hypre_ParVectorInDomainOf(ams_data -> Pi);
ams_data -> g2 = hypre_ParVectorInDomainOf(ams_data -> Pi);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSolve
*
* Solve the system A x = b.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSolve(void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
HYPRE_Int i, my_id = -1;
HYPRE_Real r0_norm, r_norm, b_norm, relative_resid = 0, old_resid;
char cycle[30];
hypre_ParCSRMatrix *Ai[5], *Pi[5];
HYPRE_Solver Bi[5];
HYPRE_PtrToSolverFcn HBi[5];
hypre_ParVector *ri[5], *gi[5];
hypre_ParVector *z = NULL;
Ai[0] = ams_data -> A_G; Pi[0] = ams_data -> G;
Ai[1] = ams_data -> A_Pi; Pi[1] = ams_data -> Pi;
Ai[2] = ams_data -> A_Pix; Pi[2] = ams_data -> Pix;
Ai[3] = ams_data -> A_Piy; Pi[3] = ams_data -> Piy;
Ai[4] = ams_data -> A_Piz; Pi[4] = ams_data -> Piz;
Bi[0] = ams_data -> B_G; HBi[0] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
Bi[1] = ams_data -> B_Pi; HBi[1] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGBlockSolve;
Bi[2] = ams_data -> B_Pix; HBi[2] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
Bi[3] = ams_data -> B_Piy; HBi[3] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
Bi[4] = ams_data -> B_Piz; HBi[4] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
ri[0] = ams_data -> r1; gi[0] = ams_data -> g1;
ri[1] = ams_data -> r2; gi[1] = ams_data -> g2;
ri[2] = ams_data -> r1; gi[2] = ams_data -> g1;
ri[3] = ams_data -> r1; gi[3] = ams_data -> g1;
ri[4] = ams_data -> r1; gi[4] = ams_data -> g1;
/* may need to create an additional temporary vector for relaxation */
if (hypre_NumThreads() > 1 || ams_data -> A_relax_type == 16)
{
z = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(z);
hypre_ParVectorSetPartitioningOwner(z,0);
}
if (ams_data -> print_level > 0)
hypre_MPI_Comm_rank(hypre_ParCSRMatrixComm(A), &my_id);
/* Compatible subspace projection for problems with zero-conductivity regions.
Note that this modifies the input (r.h.s.) vector b! */
if ( (ams_data -> B_G0) &&
(++ams_data->solve_counter % ( ams_data -> projection_frequency ) == 0) )
{
/* hypre_printf("Projecting onto the compatible subspace...\n"); */
hypre_AMSProjectOutGradients(ams_data, b);
}
if (ams_data -> beta_is_zero)
{
switch (ams_data -> cycle_type)
{
case 0:
hypre_sprintf(cycle,"%s","0");
break;
case 1:
case 3:
case 5:
case 7:
default:
hypre_sprintf(cycle,"%s","020");
break;
case 2:
case 4:
case 6:
case 8:
hypre_sprintf(cycle,"%s","(0+2)");
break;
case 11:
case 13:
hypre_sprintf(cycle,"%s","0345430");
break;
case 12:
hypre_sprintf(cycle,"%s","(0+3+4+5)");
break;
case 14:
hypre_sprintf(cycle,"%s","0(+3+4+5)0");
break;
}
}
else
{
switch (ams_data -> cycle_type)
{
case 0:
hypre_sprintf(cycle,"%s","010");
break;
case 1:
default:
hypre_sprintf(cycle,"%s","01210");
break;
case 2:
hypre_sprintf(cycle,"%s","(0+1+2)");
break;
case 3:
hypre_sprintf(cycle,"%s","02120");
break;
case 4:
hypre_sprintf(cycle,"%s","(010+2)");
break;
case 5:
hypre_sprintf(cycle,"%s","0102010");
break;
case 6:
hypre_sprintf(cycle,"%s","(020+1)");
break;
case 7:
hypre_sprintf(cycle,"%s","0201020");
break;
case 8:
hypre_sprintf(cycle,"%s","0(+1+2)0");
break;
case 9:
hypre_sprintf(cycle,"%s","01210");
break;
case 11:
hypre_sprintf(cycle,"%s","013454310");
break;
case 12:
hypre_sprintf(cycle,"%s","(0+1+3+4+5)");
break;
case 13:
hypre_sprintf(cycle,"%s","034515430");
break;
case 14:
hypre_sprintf(cycle,"%s","01(+3+4+5)10");
break;
case 20:
hypre_sprintf(cycle,"%s","020");
break;
}
}
for (i = 0; i < ams_data -> maxit; i++)
{
/* Compute initial residual norms */
if (ams_data -> maxit > 1 && i == 0)
{
hypre_ParVectorCopy(b, ams_data -> r0);
hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0);
r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0,ams_data -> r0));
r0_norm = r_norm;
b_norm = sqrt(hypre_ParVectorInnerProd(b, b));
if (b_norm)
relative_resid = r_norm / b_norm;
else
relative_resid = r_norm;
if (my_id == 0 && ams_data -> print_level > 0)
{
hypre_printf(" relative\n");
hypre_printf(" residual factor residual\n");
hypre_printf(" -------- ------ --------\n");
hypre_printf(" Initial %e %e\n",
r_norm, relative_resid);
}
}
/* Apply the preconditioner */
hypre_ParCSRSubspacePrec(ams_data -> A,
ams_data -> A_relax_type,
ams_data -> A_relax_times,
ams_data -> A_l1_norms ? hypre_VectorData(ams_data -> A_l1_norms) : NULL,
ams_data -> A_relax_weight,
ams_data -> A_omega,
ams_data -> A_max_eig_est,
ams_data -> A_min_eig_est,
ams_data -> A_cheby_order,
ams_data -> A_cheby_fraction,
Ai, Bi, HBi, Pi, ri, gi,
b, x,
ams_data -> r0,
ams_data -> g0,
cycle,
z);
/* Compute new residual norms */
if (ams_data -> maxit > 1)
{
old_resid = r_norm;
hypre_ParVectorCopy(b, ams_data -> r0);
hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0);
r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0,ams_data -> r0));
if (b_norm)
relative_resid = r_norm / b_norm;
else
relative_resid = r_norm;
if (my_id == 0 && ams_data -> print_level > 0)
hypre_printf(" Cycle %2d %e %f %e \n",
i+1, r_norm, r_norm / old_resid, relative_resid);
}
if (relative_resid < ams_data -> tol)
{
i++;
break;
}
}
if (my_id == 0 && ams_data -> print_level > 0 && ams_data -> maxit > 1)
hypre_printf("\n\n Average Convergence Factor = %f\n\n",
pow((r_norm/r0_norm),(1.0/(HYPRE_Real) i)));
ams_data -> num_iterations = i;
ams_data -> rel_resid_norm = relative_resid;
if (ams_data -> num_iterations == ams_data -> maxit && ams_data -> tol > 0.0)
hypre_error(HYPRE_ERROR_CONV);
if (z)
hypre_ParVectorDestroy(z);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRSubspacePrec
*
* General subspace preconditioner for A0 y = x, based on ParCSR storage.
*
* P[i] and A[i] are the interpolation and coarse grid matrices for
* the (i+1)'th subspace. B[i] is an AMG solver for A[i]. r[i] and g[i]
* are temporary vectors. A0_* are the fine grid smoothing parameters.
*
* The default mode is multiplicative, '+' changes the next correction
* to additive, based on residual computed at '('.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRSubspacePrec(/* fine space matrix */
hypre_ParCSRMatrix *A0,
/* relaxation parameters */
HYPRE_Int A0_relax_type,
HYPRE_Int A0_relax_times,
HYPRE_Real *A0_l1_norms,
HYPRE_Real A0_relax_weight,
HYPRE_Real A0_omega,
HYPRE_Real A0_max_eig_est,
HYPRE_Real A0_min_eig_est,
HYPRE_Int A0_cheby_order,
HYPRE_Real A0_cheby_fraction,
/* subspace matrices */
hypre_ParCSRMatrix **A,
/* subspace preconditioners */
HYPRE_Solver *B,
/* hypre solver functions for B */
HYPRE_PtrToSolverFcn *HB,
/* subspace interpolations */
hypre_ParCSRMatrix **P,
/* temporary subspace vectors */
hypre_ParVector **r,
hypre_ParVector **g,
/* right-hand side */
hypre_ParVector *x,
/* current approximation */
hypre_ParVector *y,
/* current residual */
hypre_ParVector *r0,
/* temporary vector */
hypre_ParVector *g0,
char *cycle,
/* temporary vector */
hypre_ParVector *z)
{
char *op;
HYPRE_Int use_saved_residual = 0;
for (op = cycle; *op != '\0'; op++)
{
/* do nothing */
if (*op == ')')
continue;
/* compute the residual: r = x - Ay */
else if (*op == '(')
{
hypre_ParVectorCopy(x,r0);
hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, r0);
}
/* switch to additive correction */
else if (*op == '+')
{
use_saved_residual = 1;
continue;
}
/* smooth: y += S (x - Ay) */
else if (*op == '0')
{
hypre_ParCSRRelax(A0, x,
A0_relax_type,
A0_relax_times,
A0_l1_norms,
A0_relax_weight,
A0_omega,
A0_max_eig_est,
A0_min_eig_est,
A0_cheby_order,
A0_cheby_fraction,
y, g0, z);
}
/* subspace correction: y += P B^{-1} P^t r */
else
{
HYPRE_Int i = *op - '1';
if (i < 0)
hypre_error_in_arg(16);
/* skip empty subspaces */
if (!A[i]) continue;
/* compute the residual? */
if (use_saved_residual)
{
use_saved_residual = 0;
hypre_ParCSRMatrixMatvecT(1.0, P[i], r0, 0.0, r[i]);
}
else
{
hypre_ParVectorCopy(x,g0);
hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, g0);
hypre_ParCSRMatrixMatvecT(1.0, P[i], g0, 0.0, r[i]);
}
hypre_ParVectorSetConstantValues(g[i], 0.0);
(*HB[i]) (B[i], (HYPRE_Matrix)A[i],
(HYPRE_Vector)r[i], (HYPRE_Vector)g[i]);
hypre_ParCSRMatrixMatvec(1.0, P[i], g[i], 0.0, g0);
hypre_ParVectorAxpy(1.0, g0, y);
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSGetNumIterations
*
* Get the number of AMS iterations.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSGetNumIterations(void *solver,
HYPRE_Int *num_iterations)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
*num_iterations = ams_data -> num_iterations;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSGetFinalRelativeResidualNorm
*
* Get the final relative residual norm in AMS.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSGetFinalRelativeResidualNorm(void *solver,
HYPRE_Real *rel_resid_norm)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
*rel_resid_norm = ams_data -> rel_resid_norm;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSProjectOutGradients
*
* For problems with zero-conductivity regions, project the vector onto the
* compatible subspace: x = (I - G0 (G0^t G0)^{-1} G0^T) x, where G0 is the
* discrete gradient restricted to the interior nodes of the regions with
* zero conductivity. This ensures that x is orthogonal to the gradients in
* the range of G0.
*
* This function is typically called after the solution iteration is complete,
* in order to facilitate the visualization of the computed field. Without it
* the values in the zero-conductivity regions contain kernel components.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSProjectOutGradients(void *solver,
hypre_ParVector *x)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (ams_data -> B_G0)
{
hypre_ParCSRMatrixMatvecT(1.0, ams_data -> G0, x, 0.0, ams_data -> r1);
hypre_ParVectorSetConstantValues(ams_data -> g1, 0.0);
hypre_BoomerAMGSolve(ams_data -> B_G0, ams_data -> A_G0, ams_data -> r1, ams_data -> g1);
hypre_ParCSRMatrixMatvec(1.0, ams_data -> G0, ams_data -> g1, 0.0, ams_data -> g0);
hypre_ParVectorAxpy(-1.0, ams_data -> g0, x);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSConstructDiscreteGradient
*
* Construct and return the lowest-order discrete gradient matrix G, based on:
* - a matrix on the egdes (e.g. the stiffness matrix A)
* - a vector on the vertices (e.g. the x coordinates)
* - the array edge_vertex, which lists the global indexes of the
* vertices of the local edges.
*
* We assume that edge_vertex lists the edge vertices consecutively,
* and that the orientation of all edges is consistent. More specificaly:
* If edge_orientation = 1, the edges are already oriented.
* If edge_orientation = 2, the orientation of edge i depends only on the
* sign of edge_vertex[2*i+1] - edge_vertex[2*i].
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSConstructDiscreteGradient(hypre_ParCSRMatrix *A,
hypre_ParVector *x_coord,
HYPRE_BigInt *edge_vertex,
HYPRE_Int edge_orientation,
hypre_ParCSRMatrix **G_ptr)
{
hypre_ParCSRMatrix *G;
HYPRE_Int nedges;
nedges = hypre_ParCSRMatrixNumRows(A);
/* Construct the local part of G based on edge_vertex and the edge
and vertex partitionings from A and x_coord */
{
HYPRE_Int i, *I = hypre_CTAlloc(HYPRE_Int, nedges+1, HYPRE_MEMORY_HOST);
HYPRE_Int part_size;
HYPRE_BigInt *row_starts, *col_starts;
HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2*nedges, HYPRE_MEMORY_HOST);
hypre_CSRMatrix *local = hypre_CSRMatrixCreate (nedges,
hypre_ParVectorGlobalSize(x_coord),
2*nedges);
for (i = 0; i <= nedges; i++)
I[i] = 2*i;
if (edge_orientation == 1)
{
/* Assume that the edges are already oriented */
for (i = 0; i < 2*nedges; i+=2)
{
data[i] = -1.0;
data[i+1] = 1.0;
}
}
else if (edge_orientation == 2)
{
/* Assume that the edge orientation is based on the vertex indexes */
for (i = 0; i < 2*nedges; i+=2)
{
if (edge_vertex[i] < edge_vertex[i+1])
{
data[i] = -1.0;
data[i+1] = 1.0;
}
else
{
data[i] = 1.0;
data[i+1] = -1.0;
}
}
}
else
{
hypre_error_in_arg(4);
}
hypre_CSRMatrixI(local) = I;
hypre_CSRMatrixBigJ(local) = edge_vertex;
hypre_CSRMatrixData(local) = data;
hypre_CSRMatrixRownnz(local) = NULL;
hypre_CSRMatrixOwnsData(local) = 1;
hypre_CSRMatrixNumRownnz(local) = nedges;
/* Copy partitioning from A and x_coord (previously they were re-used) */
#ifdef HYPRE_NO_GLOBAL_PARTITION
part_size = 2;
#else
hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &part_size);
part_size++;
#endif
row_starts = hypre_TAlloc(HYPRE_BigInt, part_size, HYPRE_MEMORY_HOST);
col_starts = hypre_TAlloc(HYPRE_BigInt, part_size, HYPRE_MEMORY_HOST);
for (i = 0; i < part_size; i++)
{
row_starts[i] = hypre_ParCSRMatrixRowStarts(A)[i];
col_starts[i] = hypre_ParVectorPartitioning(x_coord)[i];
}
/* Generate the discrete gradient matrix */
G = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParVectorGlobalSize(x_coord),
row_starts, col_starts, 0, 0, 0);
hypre_ParCSRMatrixOwnsRowStarts(G) = 1;
hypre_ParCSRMatrixOwnsColStarts(G) = 1;
hypre_CSRMatrixBigJtoJ(local);
GenerateDiagAndOffd(local, G,
hypre_ParVectorFirstIndex(x_coord),
hypre_ParVectorLastIndex(x_coord));
/* Account for empty rows in G. These may appear when A includes only
the interior (non-Dirichlet b.c.) edges. */
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
G_diag->num_cols = hypre_VectorSize(hypre_ParVectorLocalVector(x_coord));
}
/* Free the local matrix */
hypre_CSRMatrixDestroy(local);
}
*G_ptr = G;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSFEISetup
*
* Construct an AMS solver object based on the following data:
*
* A - the edge element stiffness matrix
* num_vert - number of vertices (nodes) in the processor
* num_local_vert - number of vertices owned by the processor
* vert_number - global indexes of the vertices in the processor
* vert_coord - coordinates of the vertices in the processor
* num_edges - number of edges owned by the processor
* edge_vertex - the vertices of the edges owned by the processor.
* Vertices are in local numbering (the same as in
* vert_number), and edge orientation is always from
* the first to the second vertex.
*
* Here we distinguish between vertices that belong to elements in the
* current processor, and the subset of these vertices that is owned by
* the processor.
*
* This function is written specifically for input from the FEI and should
* be called before hypre_AMSSetup().
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSFEISetup(void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x,
HYPRE_Int num_vert,
HYPRE_Int num_local_vert,
HYPRE_BigInt *vert_number,
HYPRE_Real *vert_coord,
HYPRE_Int num_edges,
HYPRE_BigInt *edge_vertex)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
HYPRE_Int i, j;
hypre_ParCSRMatrix *G;
hypre_ParVector *x_coord, *y_coord, *z_coord;
HYPRE_Real *x_data, *y_data, *z_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_BigInt *vert_part, num_global_vert;
HYPRE_BigInt vert_start, vert_end;
HYPRE_BigInt big_local_vert = (HYPRE_BigInt) num_local_vert;
/* Find the processor partitioning of the vertices */
#ifdef HYPRE_NO_GLOBAL_PARTITION
vert_part = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
hypre_MPI_Scan(&big_local_vert, &vert_part[1], 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm);
vert_part[0] = vert_part[1] - big_local_vert;
hypre_MPI_Allreduce(&big_local_vert, &num_global_vert, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm);
#else
HYPRE_Int num_procs;
hypre_MPI_Comm_size(comm, &num_procs);
vert_part = hypre_TAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
hypre_MPI_Allgather(&big_local_vert, 1, HYPRE_MPI_BIG_INT, &vert_part[1], 1, HYPRE_MPI_BIG_INT, comm);
vert_part[0] = 0;
for (i = 0; i < num_procs; i++)
vert_part[i+1] += vert_part[i];
num_global_vert = vert_part[num_procs];
#endif
/* Construct hypre parallel vectors for the vertex coordinates */
x_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part);
hypre_ParVectorInitialize(x_coord);
hypre_ParVectorOwnsData(x_coord) = 1;
hypre_ParVectorOwnsPartitioning(x_coord) = 0;
x_data = hypre_VectorData(hypre_ParVectorLocalVector(x_coord));
y_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part);
hypre_ParVectorInitialize(y_coord);
hypre_ParVectorOwnsData(y_coord) = 1;
hypre_ParVectorOwnsPartitioning(y_coord) = 0;
y_data = hypre_VectorData(hypre_ParVectorLocalVector(y_coord));
z_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part);
hypre_ParVectorInitialize(z_coord);
hypre_ParVectorOwnsData(z_coord) = 1;
hypre_ParVectorOwnsPartitioning(z_coord) = 0;
z_data = hypre_VectorData(hypre_ParVectorLocalVector(z_coord));
vert_start = hypre_ParVectorFirstIndex(x_coord);
vert_end = hypre_ParVectorLastIndex(x_coord);
/* Save coordinates of locally owned vertices */
for (i = 0; i < num_vert; i++)
{
if (vert_number[i] >= vert_start && vert_number[i] <= vert_end)
{
j = (HYPRE_Int)(vert_number[i] - vert_start);
x_data[j] = vert_coord[3*i];
y_data[j] = vert_coord[3*i+1];
z_data[j] = vert_coord[3*i+2];
}
}
/* Change vertex numbers from local to global */
for (i = 0; i < 2*num_edges; i++)
edge_vertex[i] = vert_number[edge_vertex[i]];
/* Construct the local part of G based on edge_vertex */
{
/* HYPRE_Int num_edges = hypre_ParCSRMatrixNumRows(A); */
HYPRE_Int *I = hypre_CTAlloc(HYPRE_Int, num_edges+1, HYPRE_MEMORY_HOST);
HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2*num_edges, HYPRE_MEMORY_HOST);
hypre_CSRMatrix *local = hypre_CSRMatrixCreate (num_edges,
num_global_vert,
2*num_edges);
for (i = 0; i <= num_edges; i++)
I[i] = 2*i;
/* Assume that the edge orientation is based on the vertex indexes */
for (i = 0; i < 2*num_edges; i+=2)
{
data[i] = 1.0;
data[i+1] = -1.0;
}
hypre_CSRMatrixI(local) = I;
hypre_CSRMatrixBigJ(local) = edge_vertex;
hypre_CSRMatrixData(local) = data;
hypre_CSRMatrixRownnz(local) = NULL;
hypre_CSRMatrixOwnsData(local) = 1;
hypre_CSRMatrixNumRownnz(local) = num_edges;
G = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
num_global_vert,
hypre_ParCSRMatrixRowStarts(A),
vert_part,
0, 0, 0);
hypre_ParCSRMatrixOwnsRowStarts(G) = 0;
hypre_ParCSRMatrixOwnsColStarts(G) = 1;
hypre_CSRMatrixBigJtoJ(local);
GenerateDiagAndOffd(local, G, vert_start, vert_end);
//hypre_CSRMatrixJ(local) = NULL;
hypre_CSRMatrixDestroy(local);
}
ams_data -> G = G;
ams_data -> x = x_coord;
ams_data -> y = y_coord;
ams_data -> z = z_coord;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSFEIDestroy
*
* Free the additional memory allocated in hypre_AMSFEISetup().
*
* This function is written specifically for input from the FEI and should
* be called before hypre_AMSDestroy().
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSFEIDestroy(void *solver)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (ams_data -> G)
hypre_ParCSRMatrixDestroy(ams_data -> G);
if (ams_data -> x)
hypre_ParVectorDestroy(ams_data -> x);
if (ams_data -> y)
hypre_ParVectorDestroy(ams_data -> y);
if (ams_data -> z)
hypre_ParVectorDestroy(ams_data -> z);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRComputeL1Norms Threads
*
* Compute the l1 norms of the rows of a given matrix, depending on
* the option parameter:
*
* option 1 = Compute the l1 norm of the rows
* option 2 = Compute the l1 norm of the (processor) off-diagonal
* part of the rows plus the diagonal of A
* option 3 = Compute the l2 norm^2 of the rows
* option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid
* Smoothers for Ultra-Parallel Computing"
*
* The above computations are done in a CF manner, whenever the provided
* cf_marker is not NULL.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRComputeL1NormsThreads(hypre_ParCSRMatrix *A,
HYPRE_Int option,
HYPRE_Int num_threads,
HYPRE_Int *cf_marker,
HYPRE_Real **l1_norm_ptr)
{
HYPRE_Int i, j, k;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real diag;
HYPRE_Real *l1_norm = hypre_TAlloc(HYPRE_Real, num_rows, hypre_ParCSRMatrixMemoryLocation(A));
HYPRE_Int ii, ns, ne, rest, size;
HYPRE_Int *cf_marker_offd = NULL;
HYPRE_Int cf_diag;
/* collect the cf marker data from other procs */
if (cf_marker != NULL)
{
HYPRE_Int index;
HYPRE_Int num_sends;
HYPRE_Int start;
HYPRE_Int *int_buf_data = NULL;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
if (num_cols_offd)
cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends))
int_buf_data = hypre_CTAlloc(HYPRE_Int,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
cf_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,k,ns,ne,rest,size,diag,cf_diag) HYPRE_SMP_SCHEDULE
#endif
for (k = 0; k < num_threads; k++)
{
size = num_rows/num_threads;
rest = num_rows - size*num_threads;
if (k < rest)
{
ns = k*size+k;
ne = (k+1)*size+k+1;
}
else
{
ns = k*size+rest;
ne = (k+1)*size+rest;
}
if (option == 1)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the CF l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
if (cf_diag == cf_marker[A_diag_J[j]])
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 2)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if (ii == i || ii < ns || ii >= ne)
l1_norm[i] += fabs(A_diag_data[j]);
}
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if ((ii == i || ii < ns || ii >= ne) &&
(cf_diag == cf_marker[A_diag_J[j]]))
l1_norm[i] += fabs(A_diag_data[j]);
}
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 3)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += A_diag_data[j] * A_diag_data[j];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += A_offd_data[j] * A_offd_data[j];
}
}
else if (option == 4)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if (ii == i || ii < ns || ii >= ne)
{
if (ii == i)
{
diag = fabs(A_diag_data[j]);
l1_norm[i] += fabs(A_diag_data[j]);
}
else
l1_norm[i] += 0.5*fabs(A_diag_data[j]);
}
}
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if ((ii == i || ii < ns || ii >= ne) &&
(cf_diag == cf_marker[A_diag_J[j]]))
{
if (ii == i)
{
diag = fabs(A_diag_data[j]);
l1_norm[i] += fabs(A_diag_data[j]);
}
else
l1_norm[i] += 0.5*fabs(A_diag_data[j]);
}
}
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
/* Truncate according to Remark 6.2 */
if (l1_norm[i] <= 4.0/3.0*diag)
l1_norm[i] = diag;
}
}
else if (option == 5) /*stores diagonal of A for Jacobi using matvec, rlx 7 */
{
/* Set the diag element */
for (i = ns; i < ne; i++)
{
l1_norm[i] = A_diag_data[A_diag_I[i]];
if (l1_norm[i] == 0) l1_norm[i] = 1.0;
}
}
if (option < 5)
{
/* Handle negative definite matrices */
for (i = ns; i < ne; i++)
if (A_diag_data[A_diag_I[i]] < 0)
l1_norm[i] = -l1_norm[i];
for (i = ns; i < ne; i++)
/* if (fabs(l1_norm[i]) < DBL_EPSILON) */
if (fabs(l1_norm[i]) == 0.0)
{
hypre_error_in_arg(1);
break;
}
}
}
hypre_TFree(cf_marker_offd, HYPRE_MEMORY_HOST);
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRRelaxThreads
* 1 = l1-scaled Jacobi
* 2 = l1-scaled block Gauss-Seidel/SSOR
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRRelaxThreads(hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int relax_type,
HYPRE_Int relax_times,
HYPRE_Real *l1_norms,
HYPRE_Real relax_weight,
HYPRE_Real omega,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *z)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Real *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Real *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local);
HYPRE_Real *Vext_data;
HYPRE_Real *v_buf_data;
HYPRE_Real *tmp_data;
HYPRE_Int i, j;
HYPRE_Int ii, jj;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int relax_error = 0;
HYPRE_Int num_sends;
HYPRE_Int index, start;
HYPRE_Int num_procs, num_threads, my_id;
HYPRE_Real zero = 0.0;
HYPRE_Real res, res2;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
/* only allow jacobi and GS */
if (relax_type > 2)
relax_type = 2;
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data,
Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
if (relax_type == 1) /* Jacobi */
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (relax_weight*res)/l1_norms[i];
}
}
}
else if (relax_type == 2) /* GS */
{
if (relax_weight == 1 && omega == 1)
{
tmp_data = hypre_CTAlloc(HYPRE_Real, n, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
else
{
HYPRE_Real c1 = omega*relax_weight;
HYPRE_Real c2 = omega*(1.0-relax_weight);
tmp_data = hypre_CTAlloc(HYPRE_Real, n, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
tmp_data[i] = u_data[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res2 = 0.0;
res = f_data[i];
Vtemp_data[i] = u_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
if (ii < i)
res2 += A_diag_data[jj] * (Vtemp_data[ii] - u_data[ii]);
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (c1*res + c2*res2) / l1_norms[i];
}
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
if (ii > i)
res2 += A_diag_data[jj] * (Vtemp_data[ii] - u_data[ii]);
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (c1*res + c2*res2) / l1_norms[i];
}
}
}
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
} /* end of Jacobi or G.S. */
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
return(relax_error);
}
|
vgg_winograd.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <mkl.h>
#include <string.h>
#include <hbwmalloc.h>
#include <assert.h>
#include "falcon.h"
void direct_conv(float * D0, float * F, float * O, const int N, const int K, const int P, const int Q, const int C, const int R, const int S) {
const int P_pad = P + 2;
const int Q_pad = Q + 2;
int n, k, p, q, c, r, s;
float sum;
for (n = 0; n < N; n++) {
#pragma omp parallel for
for (k = 0; k < K; k++) {
for (p = 1; p < P_pad-1; p++) {
for (q = 1; q < P_pad-1; q++) {
sum = 0;
#pragma unroll
for (c = 0; c < C; c++) {
#pragma unroll
for (r = 0; r < R; r++) {
#pragma unroll
for (s = 0; s < S; s++) {
sum += F[k*C*R*S + c*R*S + r*S + s]*D0[n*C*P_pad*Q_pad + c*P_pad*Q_pad + (p+r-1)*Q_pad + (q+s-1)];
}
}
}
O[n*K*P*Q+ k*P*Q+ (p-1)*Q+ (q-1)] = sum;
}
}
}
}
}
void winograd_conv(const int M, int irows, int C, int K, const int batch, long* total_flops, double* total_time, const int mod, const int verify){
long i, j, n;
const int outHeight = irows-2;
const int outWidth = irows-2;
const int sizeI = irows*irows;
const int sizeF = 3*3;
const int sizeO = outHeight*outWidth;
const int tiles = (outHeight)*0.5*(outWidth)*0.5;
int ret;
float* image;
//allocate data on MCDRAM
ret = hbw_posix_memalign((void*)&image, 64, batch*C*sizeI*sizeof(float));
assert(image != NULL);
float* filter;
//allocate data on MCDRAM
ret = hbw_posix_memalign((void*)&filter, 64, K*C*sizeF*sizeof(float));
assert(filter != NULL);
float* out;
//allocate data on MCDRAM
ret = hbw_posix_memalign((void*)&out, 64, batch*K*sizeO*sizeof(float));
assert(out != NULL);
//initialize image in parallel
#pragma omp parallel for private(i)
for(i = 0; i < batch*C*sizeI; i++)
image[i] = (float)(i%mod);
//initialize image in parallel
#pragma omp parallel for private(i)
for(i = 0; i < K*C*sizeF; i++)
filter[i] = (float)(i%mod);
double timer;
double timer_acc = 0.0f;
// run for 5 iterations
for(i = 0; i < 5; i++){
// discard the first iteration for average timing
if (i>0) timer= omp_get_wtime();
fal_conv(M, image, irows, C, filter, K, batch, out);
if(i>0) timer_acc += omp_get_wtime()-timer;
}
timer = timer_acc/4.0f;
long nflops = batch*K*C*(irows-2)*(irows-2)*3*3*2;
double gflops = (double) nflops*1.0e-9/timer;
*total_flops += nflops;
*total_time += timer;
if(verify){
printf("Verifying WINOGRAD CONV I = %d Batch = %d C = %d K = %d \n", irows, batch, C, K);
float* vout;
//allocate data on MCDRAM
ret = hbw_posix_memalign((void*)&vout, 64, batch*K*sizeO*sizeof(float));
assert(vout != NULL);
direct_conv(image, filter, vout, batch, K, outHeight, outWidth, C, 3, 3);
for(n = 0; n < batch*sizeO*K; n++){
if(out[n] != vout[n]){
printf("Output Error: out[%d] = %f and vout[%d] = %f \n", n, out[n], n, vout[n]);
break;
}
}
hbw_free(vout);
}else
printf("WINOGRAD CONV:\tEFFECTIVE GFLOPS is %.2f \tGFlops \tand timing is \t%f seconds \n", gflops, timer);
hbw_free(image);
hbw_free(filter);
hbw_free(out);
}
int main(int argc, char** argv){
printf("\n *****WINOGRAD CONVOLUTION******\n\n");
if(argc < 2){
printf("Enter batch_size\n");
exit(-1);
}
int i, j;
double timer;
int batch = atoi(argv[1]);
int verify = atoi(argv[2]);
const int max_tiles = 224*224*0.25;
const int I_array[13] = {226, 226, 114, 114, 58, 58, 58, 30, 30, 30, 16, 16, 16};
const int C_array[13] = {3, 64, 64, 128, 128, 256, 256, 256, 512, 512, 512, 512, 512};
const int K_array[13] = {64, 64, 128, 128, 256, 256, 256, 512, 512, 512, 512, 512, 512};
const int merge_array[13] = {1, 1, 4, 4, 8, 8, 8, 16, 16, 16, 16, 16, 16};
int t;
double total_time = 0.0f;
long total_flops = 0;
falcon_init_lib();
if (verify) printf("Verifying with Reduced batch size of 8, since direct conv takes long time...\n\n\n\n");
for(t = 0; t < 13; t++){
int irows = I_array[t];
int C = C_array[t];
int K = K_array[t];
if(verify)
winograd_conv(1, irows, C, K, 8, &total_flops, &total_time, 50, verify);
else
winograd_conv(merge_array[t], irows, C, K, batch, &total_flops, &total_time, 50, verify);
}
falcon_free_lib();
printf("\n\n");
if(!verify){
printf("WINOGRAD: OVERALL EFFECTIVE GFLOPS is %.2f GFLops and Timing is %.4f seconds \n", (double)total_flops*1.0e-9/total_time, total_time);
}
printf("\n ******************************\n\n");
printf("\n\n");
return 0;
}
|
MatrixMXN.h | #pragma once
#include "VectorND.h"
#include <fstream>
template<class T>
class MatrixMN
{
public:
int num_rows_;
int num_cols_;
T *values_;
MatrixMN()
: values_(nullptr), num_rows_(0), num_cols_(0)
{};
MatrixMN(const int& _m, const int& _n)
: values_(nullptr), num_rows_(0), num_cols_(0)
{}
void initialize(const int& _m, const int& _n)
{
num_rows_ = _m;
num_cols_ = _n;
delete(values_);
const int num_all = num_rows_ * num_cols_;
assert((double)num_rows_ * (double)num_cols_ <= (double)INT_MAX);
{
values_ = new T[num_all];
for (int i = 0; i < num_all; i++)
values_[i] = (T)0;
}
}
int get1DIndex(const int& row, const int& column) const
{
assert(row >= 0);
assert(column >= 0);
assert(row <= num_rows_);
assert(row <= num_cols_);
// column = i, row = j
return column + row * num_cols_;
}
T& getValue(const int& row, const int& column) const
{
return values_[get1DIndex(row, column)];
}
void cout()
{
for (int row = 0; row < num_rows_; row++)
{
for (int col = 0; col < num_cols_; col++)
{
std::cout << getValue(row, col) << " ";
}
std::cout << std::endl;
}
}
void multiply(const VectorND<T>& vector, VectorND<T>& result) const
{
#pragma omp parallel for
for (int row = 0; row < num_rows_; row++)
{
result.values_[row] = (T)0;
int ix = row*num_cols_;
T temp;
for (int col = 0; col < num_cols_; col++, ix++)
{
temp = values_[ix];
temp *= vector.values_[col];
result.values_[row] += temp;
}
}
}
};
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 24;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4));
ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(0,ceild(t1-2,3)),ceild(2*t1-2*t2-1,3)),ceild(16*t2-Nz-11,24));t3<=min(min(min(floord(4*Nt+Ny-9,24),floord(8*t1+Ny+7,24)),floord(16*t2+Ny+3,24)),floord(16*t1-16*t2+Nz+Ny+5,24));t3++) {
for (t4=max(max(max(0,ceild(t1-15,16)),ceild(16*t2-Nz-115,128)),ceild(24*t3-Ny-115,128));t4<=min(min(min(min(floord(4*Nt+Nx-9,128),floord(8*t1+Nx+7,128)),floord(16*t2+Nx+3,128)),floord(24*t3+Nx+11,128)),floord(16*t1-16*t2+Nz+Nx+5,128));t4++) {
for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(128*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),Nt-1),2*t1+3),4*t2+2),6*t3+4),32*t4+30);t5++) {
for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) {
lbv=max(128*t4,4*t5+4);
ubv=min(128*t4+127,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
diagsm_x_bsr_n_col.c | #include "alphasparse/opt.h"
#include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include <memory.h>
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_BSR *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
const ALPHA_INT num_thread = alpha_get_thread_num();
const ALPHA_INT bs = A->block_size;
ALPHA_Number* diag=(ALPHA_Number*) alpha_malloc(A->rows*bs*sizeof(ALPHA_Number));
const ALPHA_INT m = A->rows*bs;
const ALPHA_INT n = A->cols*bs;
// assert(m==n);
memset(diag, '\0', m * sizeof(ALPHA_Number));
const ALPHA_INT b_rows = m / bs;
const ALPHA_INT b_cols = n / bs;
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT r = 0 ; r < b_rows; r++){
for(ALPHA_INT ai = A->rows_start[r]; ai < A->rows_end[r]; ai++){
ALPHA_INT ac = A->col_indx[ai];
if(ac == r){
for(ALPHA_INT b_row = 0 ; b_row < bs ; b_row++){
diag[index2(r,b_row,bs)] = A->values[ai * bs * bs + b_row *(bs + 1)];
}
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT c = 0; c < columns; ++c)
{
for (ALPHA_INT r = 0; r < A->rows * bs; ++r)
{
ALPHA_Number t;
alpha_mul(t, alpha, x[index2(c, r, ldx)]);
alpha_div(y[index2(c, r, ldy)], t, diag[r]);
}
}
alpha_free(diag);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
ejercicio1.c | #include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
main(){
int i, n = 7;
int a[n];
for (i=0; i<n; i++)
a[i] = i+1;
#pragma omp parallel for shared(a) default(none) private(n)
for (i=0; i<n; i++) a[i] += i;
printf("Después de parallel for:\n");
for (i=0; i<n; i++)
printf("a[%d] = %d\n",i,a[i]);
} |
pmv-OpenMP-a.c | #include <stdlib.h> // biblioteca con funciones atoi(), malloc() y free()
#include <stdio.h> // biblioteca donde se encuentra la función printf()
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_set_dynamic(0);
#define omp_set_num_threads(4);
#endif
int main(int argc, char ** argv){
int **M;
int *v1, *v2;
int i, k, N;
double cgt1, cgt2, ncgt; //para tiempo de ejecución
time_t t;
// Semilla de rand()
srand((unsigned) time(&t));
// Obtenemos el numero de filas x columnas de la matriz cuadrada
if(argc < 2){
fprintf(stderr,"Falta iteraciones\n");
exit(-1);
}
N = atoi(argv[1]);
// == Reserva de Memoria
// ====================================================>
v1 = (int *) malloc(N*sizeof(int));
v2 = (int *) malloc(N*sizeof(int));
if ( v1 == NULL || v2 == NULL ){
printf("Error en la reserva de espacio para los vectores\n");
exit(-2);
}
M = (int**) malloc (N*sizeof(int*));
// i como private en un for establece que cada hebra tendra una copia de i, pero en parallel for tendra cada una i como sigue
// i = 0, i = 3, i = 6 para un bucle de N = 9
#pragma omp parallel for shared(M,N) private(i) default(none)
for(i = 0; i<N; i++){
M[i] = (int*) malloc (N*sizeof(int));
if( M[i] == NULL ){
printf("Error en la reserva de espacio para los vectores\n");
exit(-2);
}
}
// == Inicializacion
// ====================================================>
// M, v1, v2, N, i compartidas
// Cada hebra se encargará de una parte del bucle usando i
// k es privada
// Para que cada hebra que este calculando la parte iesima del bucle y tenga una copia de k = 0 propia, parte k es secuencial
#pragma omp parallel for shared(N,M) private(i,k) default(none)
for(i = 0; i<N; i++){
for(k = 0; k<N; k++)
M[i][k] = rand() % 8;
}
#pragma omp parallel for shared(v1,v2,N) private(i) default(none)
for(i = 0; i<N; i++){
v1[i] = rand() % 6;
v2[i] = 0;
}
// == Calculo
// ====================================================>
cgt1 = omp_get_wtime();
#pragma omp parallel for shared(v1,v2,M,N) private(i,k) default(none)
for(i = 0; i<N; i++){
for(k = 0; k<N; k++)
v2[i] += M[i][k] * v1[k];
}
cgt2 = omp_get_wtime();
ncgt = (double)(cgt2 - cgt1);
// == Imprimir Mensajes
// ====================================================>
printf("Tiempo(seg.):%11.9f\n", ncgt);
printf("Tamaño de los vectores: %u\n", N);
printf("\tv1 = %uElem -> %lu bytes\n\tv2 = %uElem -> %lu bytes\n", N, N*sizeof(int), N, N*sizeof(int));
printf("Tamaño de la matriz: %ux%u -> %lu bytes\n", N, N, N*N*sizeof(int));
// Imprimir el primer y último componente del resultado evita que las optimizaciones del compilador
// eliminen el código de la suma.
printf("v2[0] = %u ... v2[N-1] = %u \n", v2[0], v2[N-1]);
// Para tamaños pequeños de N < 15 mostrar los valores calculados
if(N < 15){
printf("\n----------- Matriz M ----------- \n");
for(i = 0; i<N; i++){
for(k = 0; k<N; k++)
printf("%u\t", M[i][k]);
printf("\n");
}
printf("\n----------- Vector V1 ----------- \n");
for(i = 0; i<N; i++)
printf("%u\t", v1[i]);
printf("\n");
printf("\n----------- Vector V2----------- \n");
for(i = 0; i<N; i++)
printf("%u\t", v2[i]);
printf("\n");
}
// == Liberar Memoria
// ====================================================>
free(v1);
free(v2);
#pragma omp parallel for shared(M,N) private(i) default(none)
for(i = 0; i<N; i++)
free(M[i]);
free(M);
} |
threshold.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD %
% T H H R R E SS H H O O L D D %
% T HHHHH RRRR EEE SSS HHHHH O O L D D %
% T H H R R E SS H H O O L D D %
% T H H R R EEEEE SSSSS H H OOO LLLLL DDDD %
% %
% %
% MagickCore Image Threshold Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/property.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/configure.h"
#include "MagickCore/constitute.h"
#include "MagickCore/decorate.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/montage.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/shear.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/xml-tree.h"
#include "MagickCore/xml-tree-private.h"
/*
Define declarations.
*/
#define ThresholdsFilename "thresholds.xml"
/*
Typedef declarations.
*/
struct _ThresholdMap
{
char
*map_id,
*description;
size_t
width,
height;
ssize_t
divisor,
*levels;
};
/*
Static declarations.
*/
static const char
*MinimalThresholdMap =
"<?xml version=\"1.0\"?>"
"<thresholds>"
" <threshold map=\"threshold\" alias=\"1x1\">"
" <description>Threshold 1x1 (non-dither)</description>"
" <levels width=\"1\" height=\"1\" divisor=\"2\">"
" 1"
" </levels>"
" </threshold>"
" <threshold map=\"checks\" alias=\"2x1\">"
" <description>Checkerboard 2x1 (dither)</description>"
" <levels width=\"2\" height=\"2\" divisor=\"3\">"
" 1 2"
" 2 1"
" </levels>"
" </threshold>"
"</thresholds>";
/*
Forward declarations.
*/
static ThresholdMap
*GetThresholdMapFile(const char *,const char *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveThresholdImage() selects an individual threshold for each pixel
% based on the range of intensity values in its local neighborhood. This
% allows for thresholding of an image whose global intensity histogram
% doesn't contain distinctive peaks.
%
% The format of the AdaptiveThresholdImage method is:
%
% Image *AdaptiveThresholdImage(const Image *image,const size_t width,
% const size_t height,const double bias,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the local neighborhood.
%
% o height: the height of the local neighborhood.
%
% o bias: the mean bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveThresholdImage(const Image *image,
const size_t width,const size_t height,const double bias,
ExceptionInfo *exception)
{
#define AdaptiveThresholdImageTag "AdaptiveThreshold/Image"
CacheView
*image_view,
*threshold_view;
Image
*threshold_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickSizeType
number_pixels;
ssize_t
y;
/*
Initialize threshold image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
threshold_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (threshold_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(threshold_image,DirectClass,exception);
if (status == MagickFalse)
{
threshold_image=DestroyImage(threshold_image);
return((Image *) NULL);
}
/*
Threshold image.
*/
status=MagickTrue;
progress=0;
number_pixels=(MagickSizeType) width*height;
image_view=AcquireVirtualCacheView(image,exception);
threshold_view=AcquireAuthenticCacheView(threshold_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,threshold_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
channel_bias[MaxPixelChannels],
channel_sum[MaxPixelChannels];
register const Quantum
*magick_restrict p,
*magick_restrict pixels;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
center,
u,
v;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
(height/2L),image->columns+width,height,exception);
q=QueueCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*(image->columns+width)*(height/2L)+
GetPixelChannels(image)*(width/2);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(threshold_traits == UndefinedPixelTrait))
continue;
if (((threshold_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) <= (QuantumRange/2)))
{
SetPixelChannel(threshold_image,channel,p[center+i],q);
continue;
}
pixels=p;
channel_bias[channel]=0.0;
channel_sum[channel]=0.0;
for (v=0; v < (ssize_t) height; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
if (u == (ssize_t) (width-1))
channel_bias[channel]+=pixels[i];
channel_sum[channel]+=pixels[i];
pixels+=GetPixelChannels(image);
}
pixels+=GetPixelChannels(image)*image->columns;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
mean;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(threshold_traits == UndefinedPixelTrait))
continue;
if (((threshold_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) <= (QuantumRange/2)))
{
SetPixelChannel(threshold_image,channel,p[center+i],q);
continue;
}
channel_sum[channel]-=channel_bias[channel];
channel_bias[channel]=0.0;
pixels=p;
for (v=0; v < (ssize_t) height; v++)
{
channel_bias[channel]+=pixels[i];
pixels+=(width-1)*GetPixelChannels(image);
channel_sum[channel]+=pixels[i];
pixels+=GetPixelChannels(image)*(image->columns+1);
}
mean=(double) (channel_sum[channel]/number_pixels+bias);
SetPixelChannel(threshold_image,channel,(Quantum) ((double)
p[center+i] <= mean ? 0 : QuantumRange),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(threshold_image);
}
if (SyncCacheViewAuthenticPixels(threshold_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AdaptiveThresholdImage)
#endif
proceed=SetImageProgress(image,AdaptiveThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
threshold_image->type=image->type;
threshold_view=DestroyCacheView(threshold_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
threshold_image=DestroyImage(threshold_image);
return(threshold_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoThresholdImage() automatically selects a threshold and replaces each
% pixel in the image with a black pixel if the image intentsity is less than
% the selected threshold otherwise white.
%
% The format of the AutoThresholdImage method is:
%
% MagickBooleanType AutoThresholdImage(Image *image,
% const AutoThresholdMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-threshold.
%
% o method: choose from Kapur, OTSU, or Triangle.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double KapurThreshold(const Image *image,const double *histogram,
ExceptionInfo *exception)
{
#define MaxIntensity 255
double
*black_entropy,
*cumulative_histogram,
entropy,
epsilon,
maximum_entropy,
*white_entropy;
register ssize_t
i,
j;
size_t
threshold;
/*
Compute optimal threshold from the entopy of the histogram.
*/
cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*cumulative_histogram));
black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*black_entropy));
white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*white_entropy));
if ((cumulative_histogram == (double *) NULL) ||
(black_entropy == (double *) NULL) || (white_entropy == (double *) NULL))
{
if (white_entropy != (double *) NULL)
white_entropy=(double *) RelinquishMagickMemory(white_entropy);
if (black_entropy != (double *) NULL)
black_entropy=(double *) RelinquishMagickMemory(black_entropy);
if (cumulative_histogram != (double *) NULL)
cumulative_histogram=(double *)
RelinquishMagickMemory(cumulative_histogram);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(-1.0);
}
/*
Entropy for black and white parts of the histogram.
*/
cumulative_histogram[0]=histogram[0];
for (i=1; i <= MaxIntensity; i++)
cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i];
epsilon=MagickMinimumValue;
for (j=0; j <= MaxIntensity; j++)
{
/*
Black entropy.
*/
black_entropy[j]=0.0;
if (cumulative_histogram[j] > epsilon)
{
entropy=0.0;
for (i=0; i <= j; i++)
if (histogram[i] > epsilon)
entropy-=histogram[i]/cumulative_histogram[j]*
log(histogram[i]/cumulative_histogram[j]);
black_entropy[j]=entropy;
}
/*
White entropy.
*/
white_entropy[j]=0.0;
if ((1.0-cumulative_histogram[j]) > epsilon)
{
entropy=0.0;
for (i=j+1; i <= MaxIntensity; i++)
if (histogram[i] > epsilon)
entropy-=histogram[i]/(1.0-cumulative_histogram[j])*
log(histogram[i]/(1.0-cumulative_histogram[j]));
white_entropy[j]=entropy;
}
}
/*
Find histogram bin with maximum entropy.
*/
maximum_entropy=black_entropy[0]+white_entropy[0];
threshold=0;
for (j=1; j <= MaxIntensity; j++)
if ((black_entropy[j]+white_entropy[j]) > maximum_entropy)
{
maximum_entropy=black_entropy[j]+white_entropy[j];
threshold=(size_t) j;
}
/*
Free resources.
*/
white_entropy=(double *) RelinquishMagickMemory(white_entropy);
black_entropy=(double *) RelinquishMagickMemory(black_entropy);
cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram);
return(100.0*threshold/MaxIntensity);
}
static double OTSUThreshold(const Image *image,const double *histogram,
ExceptionInfo *exception)
{
double
max_sigma,
*myu,
*omega,
*probability,
*sigma,
threshold;
register ssize_t
i;
/*
Compute optimal threshold from maximization of inter-class variance.
*/
myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu));
omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega));
probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*probability));
sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma));
if ((myu == (double *) NULL) || (omega == (double *) NULL) ||
(probability == (double *) NULL) || (sigma == (double *) NULL))
{
if (sigma != (double *) NULL)
sigma=(double *) RelinquishMagickMemory(sigma);
if (probability != (double *) NULL)
probability=(double *) RelinquishMagickMemory(probability);
if (omega != (double *) NULL)
omega=(double *) RelinquishMagickMemory(omega);
if (myu != (double *) NULL)
myu=(double *) RelinquishMagickMemory(myu);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(-1.0);
}
/*
Calculate probability density.
*/
for (i=0; i <= (ssize_t) MaxIntensity; i++)
probability[i]=histogram[i];
/*
Generate probability of graylevels and mean value for separation.
*/
omega[0]=probability[0];
myu[0]=0.0;
for (i=1; i <= (ssize_t) MaxIntensity; i++)
{
omega[i]=omega[i-1]+probability[i];
myu[i]=myu[i-1]+i*probability[i];
}
/*
Sigma maximization: inter-class variance and compute optimal threshold.
*/
threshold=0;
max_sigma=0.0;
for (i=0; i < (ssize_t) MaxIntensity; i++)
{
sigma[i]=0.0;
if ((omega[i] != 0.0) && (omega[i] != 1.0))
sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0-
omega[i]));
if (sigma[i] > max_sigma)
{
max_sigma=sigma[i];
threshold=(double) i;
}
}
/*
Free resources.
*/
myu=(double *) RelinquishMagickMemory(myu);
omega=(double *) RelinquishMagickMemory(omega);
probability=(double *) RelinquishMagickMemory(probability);
sigma=(double *) RelinquishMagickMemory(sigma);
return(100.0*threshold/MaxIntensity);
}
static double TriangleThreshold(const Image *image,const double *histogram,
ExceptionInfo *exception)
{
double
a,
b,
c,
count,
distance,
inverse_ratio,
max_distance,
segment,
x1,
x2,
y1,
y2;
register ssize_t
i;
ssize_t
end,
max,
start,
threshold;
/*
Compute optimal threshold with triangle algorithm.
*/
start=0; /* find start bin, first bin not zero count */
for (i=0; i <= (ssize_t) MaxIntensity; i++)
if (histogram[i] > 0.0)
{
start=i;
break;
}
end=0; /* find end bin, last bin not zero count */
for (i=(ssize_t) MaxIntensity; i >= 0; i--)
if (histogram[i] > 0.0)
{
end=i;
break;
}
max=0; /* find max bin, bin with largest count */
count=0.0;
for (i=0; i <= (ssize_t) MaxIntensity; i++)
if (histogram[i] > count)
{
max=i;
count=histogram[i];
}
/*
Compute threshold at split point.
*/
x1=(double) max;
y1=histogram[max];
x2=(double) end;
if ((max-start) >= (end-max))
x2=(double) start;
y2=0.0;
a=y1-y2;
b=x2-x1;
c=(-1.0)*(a*x1+b*y1);
inverse_ratio=1.0/sqrt(a*a+b*b+c*c);
threshold=0;
max_distance=0.0;
if (x2 == (double) start)
for (i=start; i < max; i++)
{
segment=inverse_ratio*(a*i+b*histogram[i]+c);
distance=sqrt(segment*segment);
if ((distance > max_distance) && (segment > 0.0))
{
threshold=i;
max_distance=distance;
}
}
else
for (i=end; i > max; i--)
{
segment=inverse_ratio*(a*i+b*histogram[i]+c);
distance=sqrt(segment*segment);
if ((distance > max_distance) && (segment < 0.0))
{
threshold=i;
max_distance=distance;
}
}
return(100.0*threshold/MaxIntensity);
}
MagickExport MagickBooleanType AutoThresholdImage(Image *image,
const AutoThresholdMethod method,ExceptionInfo *exception)
{
CacheView
*image_view;
char
property[MagickPathExtent];
double
gamma,
*histogram,
sum,
threshold;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
y;
/*
Form histogram.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*histogram));
if (histogram == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=MagickTrue;
(void) ResetMagickMemory(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
double intensity = GetPixelIntensity(image,p);
histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Normalize histogram.
*/
sum=0.0;
for (i=0; i <= (ssize_t) MaxIntensity; i++)
sum+=histogram[i];
gamma=PerceptibleReciprocal(sum);
for (i=0; i <= (ssize_t) MaxIntensity; i++)
histogram[i]=gamma*histogram[i];
/*
Discover threshold from histogram.
*/
switch (method)
{
case KapurThresholdMethod:
{
threshold=KapurThreshold(image,histogram,exception);
break;
}
case OTSUThresholdMethod:
default:
{
threshold=OTSUThreshold(image,histogram,exception);
break;
}
case TriangleThresholdMethod:
{
threshold=TriangleThreshold(image,histogram,exception);
break;
}
}
histogram=(double *) RelinquishMagickMemory(histogram);
if (threshold < 0.0)
status=MagickFalse;
if (status == MagickFalse)
return(MagickFalse);
/*
Threshold image.
*/
(void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold);
(void) SetImageProperty(image,"auto-threshold:threshold",property,exception);
return(BilevelImage(image,QuantumRange*threshold/100.0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B i l e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BilevelImage() changes the value of individual pixels based on the
% intensity of each pixel channel. The result is a high-contrast image.
%
% More precisely each channel value of the image is 'thresholded' so that if
% it is equal to or less than the given value it is set to zero, while any
% value greater than that give is set to it maximum or QuantumRange.
%
% This function is what is used to implement the "-threshold" operator for
% the command line API.
%
% If the default channel setting is given the image is thresholded using just
% the gray 'intensity' of the image, rather than the individual channels.
%
% The format of the BilevelImage method is:
%
% MagickBooleanType BilevelImage(Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: define the threshold values.
%
% o exception: return any errors or warnings in this structure.
%
% Aside: You can get the same results as operator using LevelImages()
% with the 'threshold' value for both the black_point and the white_point.
%
*/
MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold,
ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
/*
Bilevel threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
q[i]=(Quantum) (pixel <= threshold ? 0 : QuantumRange);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_BilevelImage)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l a c k T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlackThresholdImage() is like ThresholdImage() but forces all pixels below
% the threshold into black while leaving all pixels at or above the threshold
% unchanged.
%
% The format of the BlackThresholdImage method is:
%
% MagickBooleanType BlackThresholdImage(Image *image,
% const char *threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BlackThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
GetPixelInfo(image,&threshold);
flags=ParseGeometry(thresholds,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.rho;
threshold.blue=geometry_info.rho;
threshold.black=geometry_info.rho;
threshold.alpha=100.0;
if ((flags & SigmaValue) != 0)
threshold.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
threshold.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
threshold.alpha=geometry_info.psi;
if (threshold.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
threshold.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
threshold.alpha=geometry_info.chi;
}
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.black*=(MagickRealType) (QuantumRange/100.0);
threshold.alpha*=(MagickRealType) (QuantumRange/100.0);
}
/*
White threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel < GetPixelInfoChannel(&threshold,channel))
q[i]=(Quantum) 0;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_BlackThresholdImage)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l a m p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClampImage() set each pixel whose value is below zero to zero and any the
% pixel whose value is above the quantum range to the quantum range (e.g.
% 65535) otherwise the pixel value remains unchanged.
%
% The format of the ClampImage method is:
%
% MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception)
{
#define ClampImageTag "Clamp/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelInfo
*magick_restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
q->red=(double) ClampPixel(q->red);
q->green=(double) ClampPixel(q->green);
q->blue=(double) ClampPixel(q->blue);
q->alpha=(double) ClampPixel(q->alpha);
q++;
}
return(SyncImage(image,exception));
}
/*
Clamp image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampPixel((MagickRealType) q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ClampImage)
#endif
proceed=SetImageProgress(image,ClampImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyThresholdMap() de-allocate the given ThresholdMap
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *DestroyThresholdMap(Threshold *map)
%
% A description of each parameter follows.
%
% o map: Pointer to the Threshold map to destroy
%
*/
MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map)
{
assert(map != (ThresholdMap *) NULL);
if (map->map_id != (char *) NULL)
map->map_id=DestroyString(map->map_id);
if (map->description != (char *) NULL)
map->description=DestroyString(map->description);
if (map->levels != (ssize_t *) NULL)
map->levels=(ssize_t *) RelinquishMagickMemory(map->levels);
map=(ThresholdMap *) RelinquishMagickMemory(map);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMap() loads and searches one or more threshold map files for the
% map matching the given name or alias.
%
% The format of the GetThresholdMap method is:
%
% ThresholdMap *GetThresholdMap(const char *map_id,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o map_id: ID of the map to look for.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ThresholdMap *GetThresholdMap(const char *map_id,
ExceptionInfo *exception)
{
ThresholdMap
*map;
map=GetThresholdMapFile(MinimalThresholdMap,"built-in",map_id,exception);
if (map != (ThresholdMap *) NULL)
return(map);
#if !defined(MAGICKCORE_ZERO_CONFIGURATION_SUPPORT)
{
const StringInfo
*option;
LinkedListInfo
*options;
options=GetConfigureOptions(ThresholdsFilename,exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
map=GetThresholdMapFile((const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),map_id,exception);
if (map != (ThresholdMap *) NULL)
break;
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
}
#endif
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMapFile() look for a given threshold map name or alias in the
% given XML file data, and return the allocated the map when found.
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *GetThresholdMap(const char *xml,const char *filename,
% const char *map_id,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o map_id: ID of the map to look for in XML list.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ThresholdMap *GetThresholdMapFile(const char *xml,const char *filename,
const char *map_id,ExceptionInfo *exception)
{
char
*p;
const char
*attribute,
*content;
double
value;
register ssize_t
i;
ThresholdMap
*map;
XMLTreeInfo
*description,
*levels,
*threshold,
*thresholds;
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
map=(ThresholdMap *) NULL;
thresholds=NewXMLTree(xml,exception);
if (thresholds == (XMLTreeInfo *) NULL)
return(map);
for (threshold=GetXMLTreeChild(thresholds,"threshold");
threshold != (XMLTreeInfo *) NULL;
threshold=GetNextXMLTreeTag(threshold))
{
attribute=GetXMLTreeAttribute(threshold,"map");
if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0))
break;
attribute=GetXMLTreeAttribute(threshold,"alias");
if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0))
break;
}
if (threshold == (XMLTreeInfo *) NULL)
{
thresholds=DestroyXMLTree(thresholds);
return(map);
}
description=GetXMLTreeChild(threshold,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
return(map);
}
levels=GetXMLTreeChild(threshold,"levels");
if (levels == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<levels>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
return(map);
}
map=(ThresholdMap *) AcquireCriticalMemory(sizeof(*map));
map->map_id=(char *) NULL;
map->description=(char *) NULL;
map->levels=(ssize_t *) NULL;
attribute=GetXMLTreeAttribute(threshold,"map");
if (attribute != (char *) NULL)
map->map_id=ConstantString(attribute);
content=GetXMLTreeContent(description);
if (content != (char *) NULL)
map->description=ConstantString(content);
attribute=GetXMLTreeAttribute(levels,"width");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels width>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->width=StringToUnsignedLong(attribute);
if (map->width == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels width>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
attribute=GetXMLTreeAttribute(levels,"height");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels height>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->height=StringToUnsignedLong(attribute);
if (map->height == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels height>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
attribute=GetXMLTreeAttribute(levels,"divisor");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels divisor>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->divisor=(ssize_t) StringToLong(attribute);
if (map->divisor < 2)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels divisor>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
content=GetXMLTreeContent(levels);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<levels>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height*
sizeof(*map->levels));
if (map->levels == (ssize_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap");
for (i=0; i < (ssize_t) (map->width*map->height); i++)
{
map->levels[i]=(ssize_t) strtol(content,&p,10);
if (p == content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too few values, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
if ((map->levels[i] < 0) || (map->levels[i] > map->divisor))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> %.20g out of range, map \"%s\"",
(double) map->levels[i],map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
content=p;
}
value=(double) strtol(content,&p,10);
(void) value;
if (p != content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too many values, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
thresholds=DestroyXMLTree(thresholds);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ L i s t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMapFile() lists the threshold maps and their descriptions
% in the given XML file data.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,const char*xml,
% const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml,
const char *filename,ExceptionInfo *exception)
{
const char
*alias,
*content,
*map;
XMLTreeInfo
*description,
*threshold,
*thresholds;
assert( xml != (char *) NULL );
assert( file != (FILE *) NULL );
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
thresholds=NewXMLTree(xml,exception);
if ( thresholds == (XMLTreeInfo *) NULL )
return(MagickFalse);
(void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description");
(void) FormatLocaleFile(file,
"----------------------------------------------------\n");
threshold=GetXMLTreeChild(thresholds,"threshold");
for ( ; threshold != (XMLTreeInfo *) NULL;
threshold=GetNextXMLTreeTag(threshold))
{
map=GetXMLTreeAttribute(threshold,"map");
if (map == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<map>");
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
alias=GetXMLTreeAttribute(threshold,"alias");
description=GetXMLTreeChild(threshold,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"",map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
content=GetXMLTreeContent(description);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<description>, map \"%s\"", map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
(void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "",
content);
}
thresholds=DestroyXMLTree(thresholds);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i s t T h r e s h o l d M a p s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMaps() lists the threshold maps and their descriptions
% as defined by "threshold.xml" to a file.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ListThresholdMaps(FILE *file,
ExceptionInfo *exception)
{
const StringInfo
*option;
LinkedListInfo
*options;
MagickStatusType
status;
status=MagickTrue;
if (file == (FILE *) NULL)
file=stdout;
options=GetConfigureOptions(ThresholdsFilename,exception);
(void) FormatLocaleFile(file,
"\n Threshold Maps for Ordered Dither Operations\n");
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
(void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option));
status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O r d e r e d D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OrderedDitherImage() will perform a ordered dither based on a number
% of pre-defined dithering threshold maps, but over multiple intensity
% levels, which can be different for different channels, according to the
% input argument.
%
% The format of the OrderedDitherImage method is:
%
% MagickBooleanType OrderedDitherImage(Image *image,
% const char *threshold_map,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold_map: A string containing the name of the threshold dither
% map to use, followed by zero or more numbers representing the number
% of color levels tho dither between.
%
% Any level number less than 2 will be equivalent to 2, and means only
% binary dithering will be applied to each color channel.
%
% No numbers also means a 2 level (bitmap) dither will be applied to all
% channels, while a single number is the number of levels applied to each
% channel in sequence. More numbers will be applied in turn to each of
% the color channels.
%
% For example: "o3x3,6" will generate a 6 level posterization of the
% image with a ordered 3x3 diffused pixel dither being applied between
% each level. While checker,8,8,4 will produce a 332 colormaped image
% with only a single checkerboard hash pattern (50% grey) between each
% color level, to basically double the number of color levels with
% a bare minimim of dithering.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType OrderedDitherImage(Image *image,
const char *threshold_map,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
char
token[MagickPathExtent];
const char
*p;
double
levels[CompositePixelChannel];
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
ThresholdMap
*map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (threshold_map == (const char *) NULL)
return(MagickTrue);
p=(char *) threshold_map;
while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) &&
(*p != '\0'))
p++;
threshold_map=p;
while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) &&
(*p != '\0'))
{
if ((p-threshold_map) >= (MagickPathExtent-1))
break;
token[p-threshold_map]=(*p);
p++;
}
token[p-threshold_map]='\0';
map=GetThresholdMap(token,exception);
if (map == (ThresholdMap *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","ordered-dither",threshold_map);
return(MagickFalse);
}
for (i=0; i < MaxPixelChannels; i++)
levels[i]=2.0;
p=strchr((char *) threshold_map,',');
if ((p != (char *) NULL) && (isdigit((int) ((unsigned char) *(++p))) != 0))
{
GetNextToken(p,&p,MagickPathExtent,token);
for (i=0; (i < MaxPixelChannels); i++)
levels[i]=StringToDouble(token,(char **) NULL);
for (i=0; (*p != '\0') && (i < MaxPixelChannels); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
levels[i]=StringToDouble(token,(char **) NULL);
}
}
for (i=0; i < MaxPixelChannels; i++)
if (fabs(levels[i]) >= 1)
levels[i]-=1.0;
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
ssize_t
n;
n=0;
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ssize_t
level,
threshold;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (fabs(levels[n]) < MagickEpsilon)
{
n++;
continue;
}
threshold=(ssize_t) (QuantumScale*q[i]*(levels[n]*(map->divisor-1)+1));
level=threshold/(map->divisor-1);
threshold-=level*(map->divisor-1);
q[i]=ClampToQuantum((double) (level+(threshold >=
map->levels[(x % map->width)+map->width*(y % map->height)]))*
QuantumRange/levels[n]);
n++;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_OrderedDitherImage)
#endif
proceed=SetImageProgress(image,DitherImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
map=DestroyThresholdMap(map);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P e r c e p t i b l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PerceptibleImage() set each pixel whose value is less than |epsilon| to
% epsilon or -epsilon (whichever is closer) otherwise the pixel value remains
% unchanged.
%
% The format of the PerceptibleImage method is:
%
% MagickBooleanType PerceptibleImage(Image *image,const double epsilon,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o epsilon: the epsilon threshold (e.g. 1.0e-9).
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PerceptibleThreshold(const Quantum quantum,
const double epsilon)
{
double
sign;
sign=(double) quantum < 0.0 ? -1.0 : 1.0;
if ((sign*quantum) >= epsilon)
return(quantum);
return((Quantum) (sign*epsilon));
}
MagickExport MagickBooleanType PerceptibleImage(Image *image,
const double epsilon,ExceptionInfo *exception)
{
#define PerceptibleImageTag "Perceptible/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelInfo
*magick_restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
q->red=(double) PerceptibleThreshold(ClampToQuantum(q->red),
epsilon);
q->green=(double) PerceptibleThreshold(ClampToQuantum(q->green),
epsilon);
q->blue=(double) PerceptibleThreshold(ClampToQuantum(q->blue),
epsilon);
q->alpha=(double) PerceptibleThreshold(ClampToQuantum(q->alpha),
epsilon);
q++;
}
return(SyncImage(image,exception));
}
/*
Perceptible image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PerceptibleThreshold(q[i],epsilon);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_PerceptibleImage)
#endif
proceed=SetImageProgress(image,PerceptibleImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a n d o m T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RandomThresholdImage() changes the value of individual pixels based on the
% intensity of each pixel compared to a random threshold. The result is a
% low-contrast, two color image.
%
% The format of the RandomThresholdImage method is:
%
% MagickBooleanType RandomThresholdImage(Image *image,
% const char *thresholds,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low,high: Specify the high and low thresholds. These values range from
% 0 to QuantumRange.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RandomThresholdImage(Image *image,
const double min_threshold, const double max_threshold,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
GetPixelInfo(image,&threshold);
/*
Random threshold image.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
threshold;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((double) q[i] < min_threshold)
threshold=min_threshold;
else
if ((double) q[i] > max_threshold)
threshold=max_threshold;
else
threshold=(double) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
q[i]=(double) q[i] <= threshold ? 0 : QuantumRange;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RandomThresholdImage)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W h i t e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WhiteThresholdImage() is like ThresholdImage() but forces all pixels above
% the threshold into white while leaving all pixels at or below the threshold
% unchanged.
%
% The format of the WhiteThresholdImage method is:
%
% MagickBooleanType WhiteThresholdImage(Image *image,
% const char *threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WhiteThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace,exception);
GetPixelInfo(image,&threshold);
flags=ParseGeometry(thresholds,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.rho;
threshold.blue=geometry_info.rho;
threshold.black=geometry_info.rho;
threshold.alpha=100.0;
if ((flags & SigmaValue) != 0)
threshold.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
threshold.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
threshold.alpha=geometry_info.psi;
if (threshold.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
threshold.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
threshold.alpha=geometry_info.chi;
}
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.black*=(MagickRealType) (QuantumRange/100.0);
threshold.alpha*=(MagickRealType) (QuantumRange/100.0);
}
/*
White threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel > GetPixelInfoChannel(&threshold,channel))
q[i]=QuantumRange;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_WhiteThresholdImage)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
FFTAbsLogRealImageFilter.h | /*
* MIT License
*
* Copyright (c) 2018-2019 Benjamin Köhler
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#pragma once
#ifndef BK_FFTABSLOGREALIMAGEFILTER_H
#define BK_FFTABSLOGREALIMAGEFILTER_H
#include <cmath>
#include <complex>
#include <bkTypeTraits/complex_traits.h>
namespace bk
{
class FFTAbsLogRealImageFilter
{
//====================================================================================================
//===== DEFINITIONS
//====================================================================================================
using self_type = FFTAbsLogRealImageFilter;
//====================================================================================================
//===== CONSTRUCTORS & DESTRUCTOR
//====================================================================================================
public:
/// @{ -------------------------------------------------- CTOR
FFTAbsLogRealImageFilter() = default;
FFTAbsLogRealImageFilter(const self_type&) = default;
FFTAbsLogRealImageFilter(self_type&&) noexcept = default;
/// @}
/// @{ -------------------------------------------------- DTOR
~FFTAbsLogRealImageFilter() = default;
/// @}
//====================================================================================================
//===== SETTER
//====================================================================================================
/// @{ -------------------------------------------------- OPERATOR =
[[maybe_unused]] auto operator=(const self_type& other) -> self_type& = default;
[[maybe_unused]] auto operator=(self_type&& other) noexcept -> self_type& = default;
/// @}
//====================================================================================================
//===== FUNCTIONS
//====================================================================================================
/// @{ -------------------------------------------------- APPLY
template<typename TImage>
[[nodiscard]] static typename TImage::template self_template_type<double> apply(const TImage& img)
{
using value_type = typename TImage::value_type;
typename TImage::template self_template_type<double> res;
res.set_size(img.size());
#pragma omp parallel for
for (unsigned int i = 0; i < img.num_values(); ++i)
{
if (bk::is_complex_v<value_type>)
{ res[i] = std::log(std::abs(img[i].real())); }
else
{ res[i] = std::log(std::abs(img[i])); }
}
return res;
}
/// @}
}; // class FFTAbsLogRealImageFilter
} // namespace bk
#endif //BK_FFTABSLOGREALIMAGEFILTER_H
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 8;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=Nt-1;t1++) {
lbp=ceild(t1+1,2);
ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1,2),ceild(8*t2-Nz+5,8));t3<=min(floord(4*Nt+Ny-9,8),floord(4*t1+Ny-1,8));t3++) {
for (t4=max(max(ceild(t1-14,16),ceild(8*t2-Nz-51,64)),ceild(8*t3-Ny-51,64));t4<=min(min(floord(4*Nt+Nx-9,64),floord(4*t1+Nx-1,64)),floord(8*t3+Nx-5,64));t4++) {
for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(64*t4-Nx+5,4)),t1);t5<=min(min(min(2*t3,Nt-1),t1+1),16*t4+14);t5++) {
for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) {
lbv=max(64*t4,4*t5+4);
ubv=min(64*t4+63,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_binop__ne_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ne_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__ne_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__ne_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__ne_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_int8)
// A*D function (colscale): GB (_AxD__ne_int8)
// D*A function (rowscale): GB (_DxB__ne_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__ne_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__ne_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_int8)
// C=scalar+B GB (_bind1st__ne_int8)
// C=scalar+B' GB (_bind1st_tran__ne_int8)
// C=A+scalar GB (_bind2nd__ne_int8)
// C=A'+scalar GB (_bind2nd_tran__ne_int8)
// C type: bool
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_INT8 || GxB_NO_NE_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ne_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ne_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ne_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ne_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ne_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ne_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__ne_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__abs_bool_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_bool_int32
// op(A') function: GB_tran__abs_bool_int32
// C type: bool
// A type: int32_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_BOOL || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_bool_int32
(
bool *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_bool_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
matrix.h | /**
* @file matrix.h This code provide a templated matrix implementation
* @author TPOC: contact@palisade-crypto.org
*
* @copyright Copyright (c) 2019, New Jersey Institute of Technology (NJIT)
* All rights reserved.
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef LBCRYPTO_MATH_MATRIX_H
#define LBCRYPTO_MATH_MATRIX_H
#include <iostream>
#include <functional>
#include <cmath>
#include <stdexcept>
#include "../math/backend.h"
#include "../lattice/backend.h"
#include "../math/nbtheory.h"
#include "../math/distrgen.h"
#include "../encoding/encodings.h"
#include "../utils/inttypes.h"
#include "../utils/utilities.h"
#include "../utils/memory.h"
using std::invalid_argument;
namespace lbcrypto {
template<class Element>
class Matrix : public Serializable {
public:
typedef vector<vector<Element>> data_t;
typedef vector<Element> data_row_t;
typedef std::function<Element(void)> alloc_func;
/**
* Constructor that initializes matrix values using a zero allocator
*
* @param &allocZero lambda function for zero initialization.
* @param &rows number of rows.
* @param &rows number of columns.
*/
Matrix(alloc_func allocZero, size_t rows, size_t cols) : data(), rows(rows), cols(cols), allocZero(allocZero) {
data.resize(rows);
for (auto row = data.begin(); row != data.end(); ++row) {
for (size_t col = 0; col < cols; ++col) {
row->push_back(allocZero());
}
}
}
//TODO: add Clear();
/**
* Constructor that initializes matrix values using a distribution generation allocator
*
* @param &allocZero lambda function for zero initialization (used for initializing derived matrix objects)
* @param &rows number of rows.
* @param &rows number of columns.
* @param &allocGen lambda function for initialization using a distribution generator.
*/
Matrix(alloc_func allocZero, size_t rows, size_t cols, alloc_func allocGen);
/**
* Constructor of an empty matrix.
* SetSize must be called on this matrix to use it
* SetAlloc needs to be called if 0 passed to constructor
* This mostly exists to support deserializing
*
* @param &allocZero lambda function for zero initialization.
*/
Matrix(alloc_func allocZero = 0) : data(), rows(0), cols(0), allocZero(allocZero) {}
/**
* Set the size of a matrix, elements are zeroed out
*
* @param rows number of rows
* @param cols number of colums
*/
void SetSize(size_t rows, size_t cols) {
if( this->rows != 0 || this->cols != 0 )
throw std::logic_error("You cannot SetSize on a non-empty matrix");
this->rows = rows;
this->cols = cols;
data.resize(rows);
for (auto row = data.begin(); row != data.end(); ++row) {
for (size_t col = 0; col < cols; ++col) {
row->push_back(allocZero());
}
}
}
/**
* SetAllocator - set the function to allocate a zero;
* basically only required for deserializer
*
* @param allocZero
*/
void SetAllocator(alloc_func allocZero) {
this->allocZero = allocZero;
}
/**
* Copy constructor
*
* @param &other the matrix object to be copied
*/
Matrix(const Matrix<Element>& other) : data(), rows(other.rows), cols(other.cols), allocZero(other.allocZero) {
deepCopyData(other.data);
}
/**
* Assignment operator
*
* @param &other the matrix object whose values are to be copied
* @return the resulting matrix
*/
Matrix<Element>& operator=(const Matrix<Element>& other);
/**
* In-place change of the current matrix to a matrix of all ones
*
* @return the resulting matrix
*/
Matrix<Element>& Ones();
/**
* Get the size of a vector in data
*/
size_t sizeofMatrixVector(std::vector<Element> matrixrow){
size_t result = 0;
for (size_t i = 0; i < matrixrow.size(); i++){
result += sizeof(matrixrow[i]);
}
return result;
}
size_t SizeOf(){
size_t result = sizeof(this->data);
for (size_t row = 0; row < rows; row++){
result += sizeofMatrixVector(data[row]);
}
return result;
}
// Macro for convenient definitions of class implementations of special functions
#define ONES_FOR_TYPE(T) \
template<> \
Matrix<T>& Matrix<T>::Ones() { \
for (size_t row = 0; row < rows; ++row) { \
for (size_t col = 0; col < cols; ++col) { \
data[row][col] = 1; \
} \
} \
return *this; \
}
/**
* In-place modulo reduction
*
* @return the resulting matrix
*/
Matrix<Element>& ModEq(const Element &modulus);
/**
* modular subtraction
*
* @return the resulting matrix
*/
Matrix<Element>& ModSubEq(Matrix<Element> const& b, const Element &modulus);
/**
* Fill matrix using the same element
*
* @param &val the element the matrix is filled by
*
* @return the resulting matrix
*/
Matrix<Element>& Fill(const Element &val);
/**
* In-place change of the current matrix to Identity matrix
*
* @return the resulting matrix
*/
Matrix<Element>& Identity();
#define IDENTITY_FOR_TYPE(T) \
template<> \
Matrix<T>& Matrix<T>::Identity() { \
for (size_t row = 0; row < rows; ++row) { \
for (size_t col = 0; col < cols; ++col) { \
if (row == col) { \
data[row][col] = 1; \
} else { \
data[row][col] = 0; \
} \
} \
} \
return *this; \
}
/**
* Sets the first row to be powers of two for when the base is two
*
* @param base is the base the digits of the matrix are represented in
* @return the resulting matrix
*/
Matrix<Element> GadgetVector(int64_t base = 2) const;
#define GADGET_FOR_TYPE(T) \
template<> \
Matrix<T> Matrix<T>::GadgetVector(int64_t base) const { \
Matrix<T> g(allocZero, rows, cols); \
auto base_matrix = allocZero(); \
size_t k = cols/rows; \
base_matrix = base; \
g(0, 0) = 1; \
for (size_t i = 1; i < k; i++) { \
g(0, i) = g(0, i-1) * base_matrix; \
} \
for (size_t row = 1; row < rows; row++) { \
for (size_t i = 0; i < k; i++) { \
g(row, i + row*k) = g(0, i); \
} \
} \
return g; \
}
#define GADGET_FOR_TYPE_DCRT(T) \
template<> \
Matrix<T> Matrix<T>::GadgetVector(int64_t base) const \
{ \
Matrix<T> g(allocZero, rows, cols); \
auto base_matrix = allocZero(); \
base_matrix = base; \
size_t bk = 1; \
\
auto params = g(0,0).GetParams()->GetParams(); \
\
uint64_t digitCount = (long)ceil(log2(params[0]->GetModulus().ConvertToDouble())/log2(base)); \
\
for (size_t k = 0; k < digitCount; k++) { \
for (size_t i = 0; i < params.size(); i++) { \
NativePoly temp(params[i]); \
temp = bk; \
g(0,k+i*digitCount).SetElementAtIndex(i,temp); \
} \
bk *= base; \
} \
\
size_t kCols = cols/rows; \
for (size_t row = 1; row < rows; row++) { \
for (size_t i = 0; i < kCols; i++) { \
g(row, i + row*kCols) = g(0, i); \
} \
} \
return g; \
}
/**
* Computes the infinity norm
*
* @return the norm in double format
*/
double Norm() const;
#define NORM_FOR_TYPE(T) \
template<> \
double Matrix<T>::Norm() const { \
double retVal = 0.0; \
double locVal = 0.0; \
for (size_t row = 0; row < rows; ++row) { \
for (size_t col = 0; col < cols; ++col) { \
locVal = data[row][col].Norm(); \
if (locVal > retVal) { \
retVal = locVal; \
} \
} \
} \
return retVal; \
}
/**
* Matrix multiplication
*
* @param &other the multiplier matrix
* @return the result of multiplication
*/
Matrix<Element> Mult(Matrix<Element> const& other) const;
/**
* Operator for matrix multiplication
*
* @param &other the multiplier matrix
* @return the result of multiplication
*/
Matrix<Element> operator*(Matrix<Element> const& other) const {
return Mult(other);
}
/**
* Multiplication of matrix by a scalar
*
* @param &other the multiplier element
* @return the result of multiplication
*/
Matrix<Element> ScalarMult(Element const& other) const {
Matrix<Element> result(*this);
#pragma omp parallel for
for (size_t col = 0; col < result.cols; ++col) {
for (size_t row = 0; row < result.rows; ++row) {
result.data[row][col] = result.data[row][col] * other;
}
}
return result;
}
/**
* Operator for scalar multiplication
*
* @param &other the multiplier element
* @return the result of multiplication
*/
Matrix<Element> operator*(Element const& other) const {
return ScalarMult(other);
}
/**
* Equality check
*
* @param &other the matrix object to compare to
* @return the boolean result
*/
bool Equal(Matrix<Element> const& other) const {
if (rows != other.rows || cols != other.cols) {
return false;
}
for (size_t i = 0; i < rows; ++i) {
for (size_t j = 0; j < cols; ++j) {
if (data[i][j] != other.data[i][j]) {
return false;
}
}
}
return true;
}
/**
* Operator for equality check
*
* @param &other the matrix object to compare to
* @return the boolean result
*/
bool operator==(Matrix<Element> const& other) const {
return Equal(other);
}
/**
* Operator for non-equality check
*
* @param &other the matrix object to compare to
* @return the boolean result
*/
bool operator!=(Matrix<Element> const& other) const {
return !Equal(other);
}
/**
* Get property to access the data as a vector of vectors
*
* @return the data as vector of vectors
*/
const data_t& GetData() const {
return data;
}
/**
* Get property to access the number of rows in the matrix
*
* @return the number of rows
*/
size_t GetRows() const {
return rows;
}
/**
* Get property to access the number of columns in the matrix
*
* @return the number of columns
*/
size_t GetCols() const {
return cols;
}
/**
* Get property to access the zero allocator for the matrix
*
* @return the lambda function corresponding to the element zero allocator
*/
alloc_func GetAllocator() const {
return allocZero;
}
/**
* Sets the evaluation or coefficient representation for all ring elements that support the SetFormat method
*
* @param &format the enum value corresponding to coefficient or evaluation representation
*/
void SetFormat(Format format);
/**
* Matrix addition
*
* @param &other the matrix to be added
* @return the resulting matrix
*/
Matrix<Element> Add(Matrix<Element> const& other) const {
if (rows != other.rows || cols != other.cols) {
throw invalid_argument("Addition operands have incompatible dimensions");
}
Matrix<Element> result(*this);
#pragma omp parallel for
for (size_t j = 0; j < cols; ++j) {
for (size_t i = 0; i < rows; ++i) {
result.data[i][j] += other.data[i][j];
}
}
return result;
}
/**
* Operator for matrix addition
*
* @param &other the matrix to be added
* @return the resulting matrix
*/
Matrix<Element> operator+(Matrix<Element> const& other) const {
return this->Add(other);
}
/**
* Operator for in-place addition
*
* @param &other the matrix to be added
* @return the resulting matrix (same object)
*/
Matrix<Element>& operator+=(Matrix<Element> const& other);
/**
* Matrix substraction
*
* @param &other the matrix to be substracted
* @return the resulting matrix
*/
Matrix<Element> Sub(Matrix<Element> const& other) const {
if (rows != other.rows || cols != other.cols) {
throw invalid_argument("Subtraction operands have incompatible dimensions");
}
Matrix<Element> result(allocZero, rows, other.cols);
#pragma omp parallel for
for (size_t j = 0; j < cols; ++j) {
for (size_t i = 0; i < rows; ++i) {
result.data[i][j] = data[i][j] - other.data[i][j];
}
}
return result;
}
/**
* Operator for matrix substraction
*
* @param &other the matrix to be substracted
* @return the resulting matrix
*/
Matrix<Element> operator-(Matrix<Element> const& other) const {
return this->Sub(other);
}
/**
* Operator for in-place matrix substraction
*
* @param &other the matrix to be substracted
* @return the resulting matrix (same object)
*/
Matrix<Element>& operator-=(Matrix<Element> const& other);
/**
* Matrix transposition
*
* @return the resulting matrix
*/
Matrix<Element> Transpose() const;
// YSP The signature of this method needs to be changed in the future
/**
* Matrix determinant - found using Laplace formula with complexity O(d!), where d is the dimension
*
* @param *result where the result is stored
*/
void Determinant(Element *result) const;
//Element Determinant() const;
/**
* Cofactor matrix - the matrix of determinants of the minors A_{ij} multiplied by -1^{i+j}
*
* @return the cofactor matrix for the given matrix
*/
Matrix<Element> CofactorMatrix() const;
/**
* Add rows to bottom of the matrix
*
* @param &other the matrix to be added to the bottom of current matrix
* @return the resulting matrix
*/
Matrix<Element>& VStack(Matrix<Element> const& other);
/**
* Add columns the right of the matrix
*
* @param &other the matrix to be added to the right of current matrix
* @return the resulting matrix
*/
Matrix<Element>& HStack(Matrix<Element> const& other);
/**
* Matrix indexing operator - writeable instance of the element
*
* @param &row row index
* @param &col column index
* @return the element at the index
*/
Element& operator()(size_t row, size_t col) {
return data[row][col];
}
/**
* Matrix indexing operator - read-only instance of the element
*
* @param &row row index
* @param &col column index
* @return the element at the index
*/
Element const& operator()(size_t row, size_t col) const {
return data[row][col];
}
/**
* Matrix row extractor
*
* @param &row row index
* @return the row at the index
*/
Matrix<Element> ExtractRow(size_t row) const {
Matrix<Element> result(this->allocZero,1,this->cols);
int i = 0;
for (auto elem = this->GetData()[row].begin(); elem != this->GetData()[row].end(); ++elem) {
result(0,i) = *elem;
i++;
}
return result;
//return *this;
}
/**
* Matrix column extractor
*
* @param &col col index
* @return the col at the index
*/
Matrix<Element> ExtractCol(size_t col) const {
Matrix<Element> result(this->allocZero,this->rows,1);
for (size_t i = 0; i < this->rows; i++) {
result(i,0) = data[i][col];
}
return result;
//return *this;
}
/**
* Matrix rows extractor in a range from row_start to row_and; inclusive
*
* @param &row_start &row_end row indices
* @return the rows in the range delimited by indices inclusive
*/
inline Matrix<Element> ExtractRows(size_t row_start, size_t row_end) const {
Matrix<Element> result(this->allocZero,row_end-row_start+1,this->cols);
for(usint row=row_start; row<row_end+1; row++) {
int i = 0;
for (auto elem = this->GetData()[row].begin(); elem != this->GetData()[row].end(); ++elem) {
result(row-row_start,i) = *elem;
i++;
}
}
return result;
}
friend std::ostream& operator<<(std::ostream& os, const Matrix<Element>& m) {
os << "[ ";
for (size_t row = 0; row < m.GetRows(); ++row) {
os << "[ ";
for (size_t col = 0; col < m.GetCols(); ++col) {
os << m(row,col) << " ";
}
os << "]\n";
}
os << " ]\n";
return os;
}
/**
* Call switch format for each (ring) element
*
*/
void SwitchFormat();
#define NOT_AN_ELEMENT_MATRIX(T) \
template<> \
void Matrix<T>::SwitchFormat() { \
PALISADE_THROW(not_available_error, "Not a matrix of Elements"); \
}
/*
* Multiply the matrix by a vector whose elements are all 1's. This causes the elements of each
* row of the matrix to be added and placed into the corresponding position in the output vector.
*/
Matrix<Element> MultByUnityVector() const;
/*
* Multiply the matrix by a vector of random 1's and 0's, which is the same as adding select
* elements in each row together.
* Return a vector that is a rows x 1 matrix.
*/
Matrix<Element> MultByRandomVector(std::vector<int> ranvec) const;
template <class Archive>
void save( Archive & ar, std::uint32_t const version ) const
{
ar( ::cereal::make_nvp("d", data) );
ar( ::cereal::make_nvp("r", rows) );
ar( ::cereal::make_nvp("c", cols) );
}
template <class Archive>
void load( Archive & ar, std::uint32_t const version )
{
if( version > SerializedVersion() ) {
PALISADE_THROW(deserialize_error, "serialized object version " + std::to_string(version) + " is from a later version of the library");
}
ar( ::cereal::make_nvp("d", data) );
ar( ::cereal::make_nvp("r", rows) );
ar( ::cereal::make_nvp("c", cols) );
// users will need to SetAllocator for any newly deserialized matrix
}
std::string SerializedObjectName() const { return "Matrix"; }
static uint32_t SerializedVersion() { return 1; }
private:
data_t data;
size_t rows;
size_t cols;
alloc_func allocZero;
//mutable int NUM_THREADS = 1;
//deep copy of data - used for copy constructor
void deepCopyData(data_t const& src) {
data.clear();
data.resize(src.size());
for (size_t row = 0; row < src.size(); ++row) {
for (auto elem = src[row].begin(); elem != src[row].end(); ++elem) {
data[row].push_back(*elem);
}
}
}
};
/**
* Operator for scalar multiplication of matrix
*
* @param &e element
* @param &M matrix
* @return the resulting matrix
*/
template<class Element>
Matrix<Element> operator*(Element const& e, Matrix<Element> const& M) {
return M.ScalarMult(e);
}
/**
* Generates a matrix of rotations. See pages 7-8 of https://eprint.iacr.org/2013/297
*
* @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated
* @return the resulting matrix of big binary integers
*/
template<typename Element>
Matrix<typename Element::Integer> Rotate(Matrix<Element> const& inMat);
/**
* Each element becomes a square matrix with columns of that element's
* rotations in coefficient form. See pages 7-8 of https://eprint.iacr.org/2013/297
*
* @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated
* @return the resulting matrix of big binary integers
*/
template<typename Element>
Matrix<typename Element::Vector> RotateVecResult(Matrix<Element> const& inMat);
/**
* Stream output operator
*
* @param &os stream
* @param &m matrix to be outputted
* @return the chained stream
*/
template<class Element>
std::ostream& operator<<(std::ostream& os, const Matrix<Element>& m);
/**
* Gives the Choleshky decomposition of the input matrix.
* The assumption is that covariance matrix does not have large coefficients because it is formed by
* discrete gaussians e and s; this implies int32_t can be used
* This algorithm can be further improved - see the Darmstadt paper section 4.4
* http://eprint.iacr.org/2013/297.pdf
*
* @param &input the matrix for which the Cholesky decomposition is to be computed
* @return the resulting matrix of floating-point numbers
*/
Matrix<double> Cholesky(const Matrix<int32_t> &input);
void Cholesky(const Matrix<int32_t> &input, Matrix<double> &result);
/**
* Convert a matrix of integers from BigInteger to int32_t
* Convert from Z_q to [-q/2, q/2]
*
* @param &input the input matrix
* @param &modulus the ring modulus
* @return the resulting matrix of int32_t
*/
Matrix<int32_t> ConvertToInt32(const Matrix<BigInteger> &input, const BigInteger& modulus);
/**
* Convert a matrix of BigVector to int32_t
* Convert from Z_q to [-q/2, q/2]
*
* @param &input the input matrix
* @param &modulus the ring modulus
* @return the resulting matrix of int32_t
*/
Matrix<int32_t> ConvertToInt32(const Matrix<BigVector> &input, const BigInteger& modulus);
/**
* Split a vector of int32_t into a vector of ring elements with ring dimension n
*
* @param &other the input matrix
* @param &n the ring dimension
* @param ¶ms Poly element params
* @return the resulting matrix of Poly
*/
template<typename Element>
Matrix<Element> SplitInt64IntoElements(Matrix<int64_t> const& other, size_t n, const shared_ptr<typename Element::Params> params);
#define SPLIT64_FOR_TYPE(T) \
template<> \
Matrix<T> SplitInt64IntoElements(Matrix<int64_t> const& other, size_t n, const shared_ptr<typename T::Params> params) { \
auto zero_alloc = T::Allocator(params, COEFFICIENT); \
size_t rows = other.GetRows() / n; \
Matrix<T> result(zero_alloc, rows, 1); \
for (size_t row = 0; row < rows; ++row) { \
std::vector<int64_t> values(n); \
for (size_t i = 0; i < n; ++i) \
values[i] = other(row*n + i, 0); \
result(row, 0) = values; \
} \
return result; \
}
/**
* Another method for splitting a vector of int32_t into a vector of ring elements with ring dimension n
*
* @param &other the input matrix
* @param &n the ring dimension
* @param ¶ms Poly element params
* @return the resulting matrix of Poly
*/
template<typename Element>
Matrix<Element> SplitInt32AltIntoElements(Matrix<int32_t> const& other, size_t n, const shared_ptr<typename Element::Params> params);
#define SPLIT32ALT_FOR_TYPE(T) \
template<> \
Matrix<T> SplitInt32AltIntoElements(Matrix<int32_t> const& other, size_t n, const shared_ptr<typename T::Params> params) { \
auto zero_alloc = T::Allocator(params, COEFFICIENT); \
size_t rows = other.GetRows(); \
Matrix<T> result(zero_alloc, rows, 1); \
for (size_t row = 0; row < rows; ++row) { \
std::vector<int32_t> values(n); \
for (size_t i = 0; i < n; ++i) \
values[i] = other(row, i); \
result(row, 0) = values; \
} \
return result; \
}
/**
* Split a vector of int64_t into a vector of ring elements with ring dimension n
*
* @param &other the input matrix
* @param &n the ring dimension
* @param ¶ms Poly element params
* @return the resulting matrix of Poly
*/
template<typename Element>
Matrix<Element> SplitInt64AltIntoElements(Matrix<int64_t> const& other, size_t n, const shared_ptr<typename Element::Params> params);
#define SPLIT64ALT_FOR_TYPE(T) \
template<> \
Matrix<T> SplitInt64AltIntoElements(Matrix<int64_t> const& other, size_t n, const shared_ptr<typename T::Params> params) { \
auto zero_alloc = T::Allocator(params, COEFFICIENT); \
size_t rows = other.GetRows(); \
Matrix<T> result(zero_alloc, rows, 1); \
for (size_t row = 0; row < rows; ++row) { \
std::vector<int64_t> values(n); \
for (size_t i = 0; i < n; ++i) \
values[i] = other(row, i); \
result(row, 0) = values; \
} \
return result; \
}
}
#endif // LBCRYPTO_MATH_MATRIX_H
|
linear_algebra.c | /*
* linear_algebra.c
* simple code for matrix funcitons
* Brian J Gravelle
* ix.cs.uoregon.edu/~gravelle
* gravelle@cs.uoregon.edu
* See LICENSE file for licensing information and boring legal stuff
* If by some miricale you find this software useful, thanks are accepted in
* the form of chocolate, coffee, or introductions to potential employers.
*/
#include "linear_algebra.h"
#include <math.h>
//add matrices together
//pre all matrices are initialized, c shouldn't have any important data in it
// all matrices should be the same dimensions
//post mat_c has the result of multipling mat_a and mat_b
void add_matrix(double** mat_a, int rows, int cols, double** mat_b, double** mat_c) {
int i, j;
for (i = 0; i < rows; i++) {
for (j = 0; j < cols; j++) {
mat_c[i][j] = mat_a[i][j] + mat_b[i][j];
}
}
}
//multiply matrices together, serial
//pre all matrices are initialized, c shouldn't have any important data in it
// rows in b == cols in a
// c is initialized to the same size as b
//post mat_c has the result of multipling mat_a and mat_b
void multiply_matrix_s(double** mat_a, int rows_a, int cols_a,
double** mat_b, int cols_b,
double** mat_c) {
int i, j, k;
for (i = 0; i < rows_a; i++) {
for (j = 0; j < cols_b; j++) {
mat_c[i][j] = 0;
for (k = 0; k < cols_a; k++) {
mat_c[i][j] += mat_a[i][k] * mat_b[k][j];
}
}
}
}
//multiply matrices together
//this version flips the j and k loops in an attempt to improve vectorization
//pre all matrices are initialized, c shouldn't have any important data in it
// mat b is NOT transposed
// rows in b == cols in a
// c is initialized to the same size as b
//post mat_c has the result of multipling mat_a and mat_b
void multiply_matrix_d(double** restrict __attribute__((aligned (64))) mat_a, int rows_a, int cols_a,
double** restrict __attribute__((aligned (64))) mat_b, int cols_b,
double** restrict __attribute__((aligned (64))) mat_c) {
#ifdef USE_LIKWID
#pragma omp parallel
{
LIKWID_MARKER_START("default_mm");
}
#endif
#ifdef USE_CALI_UNCORE
CALI_MARK_BEGIN("default_mm");
#endif
#pragma omp parallel
{
#ifdef USE_CALI_REG
CALI_MARK_BEGIN("default_mm");
#endif
#pragma omp for
for (int i = 0; i < rows_a; i++ ) {
for (int j = 0; j < cols_b; j++) {
// #pragma omp simd
for (int k = 0; k < cols_a; k++)
mat_c[i][j] = mat_c[i][j] + mat_a[i][k] * mat_b[k][j];
}
} // i
#ifdef USE_CALI_REG
CALI_MARK_END("default_mm");
#endif
}
#ifdef USE_CALI_UNCORE
CALI_MARK_END("default_mm");
#endif
#ifdef USE_LIKWID
#pragma omp parallel
{
LIKWID_MARKER_STOP("default_mm");
}
#endif
}
//multiply matrices together
//this version flips the j and k loops in an attempt to improve vectorization
//pre all matrices are initialized, c shouldn't have any important data in it
// mat b is NOT transposed
// rows in b == cols in a
// c is initialized to the same size as b
//post mat_c has the result of multipling mat_a and mat_b
void multiply_matrix_i(double** restrict __attribute__((aligned (64))) mat_a, int rows_a, int cols_a,
double** restrict __attribute__((aligned (64))) mat_b, int cols_b,
double** restrict __attribute__((aligned (64))) mat_c) {
#ifdef USE_LIKWID
#pragma omp parallel
{
LIKWID_MARKER_START("interchange_mm");
}
#endif
#ifdef USE_CALI_UNCORE
CALI_MARK_BEGIN("interchange_mm");
#endif
#pragma omp parallel
{
#ifdef USE_CALI_REG
CALI_MARK_BEGIN("interchange_mm");
#endif
#pragma omp for
for (int j = 0; j < cols_b; j++) {
for (int i = 0; i < rows_a; i++ ) {
// #pragma omp simd
for (int k = 0; k < cols_a; k++)
mat_c[i][j] = mat_c[i][j] + mat_a[i][k] * mat_b[k][j];
}
}
#ifdef USE_CALI_REG
CALI_MARK_END("interchange_mm");
#endif
}
#ifdef USE_CALI_UNCORE
CALI_MARK_END("interchange_mm");
#endif
#ifdef USE_LIKWID
#pragma omp parallel
{
LIKWID_MARKER_STOP("interchange_mm");
}
#endif
}
//multiply matrices together
//this version expects b to be transposed for cacheing reasons
//pre all matrices are initialized, c shouldn't have any important data in it
// mat b is transposed
// rows in b == cols in a
// c is initialized to the same size as b
//post mat_c has the result of multipling mat_a and mat_b
void multiply_matrix_t(double** restrict __attribute__((aligned (64))) mat_a, int rows_a, int cols_a,
double** restrict __attribute__((aligned (64))) mat_b, int cols_b,
double** restrict __attribute__((aligned (64))) mat_c) {
#ifdef USE_LIKWID
#pragma omp parallel
{
LIKWID_MARKER_START("transpose_mm");
}
#endif
#ifdef USE_CALI_UNCORE
CALI_MARK_BEGIN("transpose_mm");
#endif
#pragma omp parallel
{
#ifdef USE_CALI_REG
CALI_MARK_BEGIN("transpose_mm");
#endif
#pragma omp for
for (int i = 0; i < rows_a; i++ ) {
for (int j = 0; j < cols_b; j++) {
// #pragma omp simd
for (int k = 0; k < cols_a; k++)
mat_c[i][j] = mat_c[i][j] + mat_a[i][k] * mat_b[j][k];
}
} // i
#ifdef USE_CALI_REG
CALI_MARK_END("transpose_mm");
#endif
}
#ifdef USE_CALI_UNCORE
CALI_MARK_END("transpose_mm");
#endif
#ifdef USE_LIKWID
#pragma omp parallel
{
LIKWID_MARKER_STOP("transpose_mm");
}
#endif
}
//multiply matrices together
//this version expects b to be transposed for cacheing reasons
//pre all matrices are initialized, c shouldn't have any important data in it
// mat b is transposed
// rows in b == cols in a
// c is initialized to the same size as b
//post mat_c has the result of multipling mat_a and mat_b
void multiply_matrix_uj(double** restrict __attribute__((aligned (64))) mat_a, int rows_a, int cols_a,
double** restrict __attribute__((aligned (64))) mat_b, int cols_b,
double** restrict __attribute__((aligned (64))) mat_c) {
#ifdef USE_LIKWID
#pragma omp parallel
{
LIKWID_MARKER_START("unrolljam_mm");
}
#endif
#ifdef USE_CALI_UNCORE
CALI_MARK_BEGIN("unrolljam_mm");
#endif
#pragma omp parallel
{
#ifdef USE_CALI_REG
CALI_MARK_BEGIN("unrolljam_mm");
#endif
#pragma omp for
for (int i = 0; i < rows_a; i++ ) {
int j;
for (j = 0; j < cols_b-8; j+=8) {
// #pragma omp simd
for (int k = 0; k < cols_a; k++){
mat_c[i][j] = mat_c[i][j] + mat_a[i][k] * mat_b[j][k];
mat_c[i][j+1] = mat_c[i][j+1] + mat_a[i][k] * mat_b[j+1][k];
mat_c[i][j+2] = mat_c[i][j+2] + mat_a[i][k] * mat_b[j+2][k];
mat_c[i][j+3] = mat_c[i][j+3] + mat_a[i][k] * mat_b[j+3][k];
mat_c[i][j+4] = mat_c[i][j+4] + mat_a[i][k] * mat_b[j+4][k];
mat_c[i][j+5] = mat_c[i][j+5] + mat_a[i][k] * mat_b[j+5][k];
mat_c[i][j+6] = mat_c[i][j+6] + mat_a[i][k] * mat_b[j+6][k];
mat_c[i][j+7] = mat_c[i][j+7] + mat_a[i][k] * mat_b[j+7][k];
}
}
for (j; j < cols_b; j++) {
#pragma omp simd
for (int k = 0; k < cols_a; k++)
mat_c[i][j] = mat_c[i][j] + mat_a[i][k] * mat_b[j][k];
}
} // i
#ifdef USE_CALI_REG
CALI_MARK_END("unrolljam_mm");
#endif
}
#ifdef USE_CALI_UNCORE
CALI_MARK_END("unrolljam_mm");
#endif
#ifdef USE_LIKWID
#pragma omp parallel
{
LIKWID_MARKER_STOP("unrolljam_mm");
}
#endif
}
//multiply matrices together; blocking and b is transposed
//pre all matrices are initialized, c shouldn't have any important data in it
// mat b is trnasposed
// rows in b == cols in a
// c is initialized to the same size as b
//post mat_c has the result of multipling mat_a and mat_b
void multiply_matrix_b(double** restrict __attribute__((aligned (64))) mat_a, int rows_a, int cols_a,
double** restrict __attribute__((aligned (64))) mat_b, int cols_b,
double** restrict __attribute__((aligned (64))) mat_c) {
int i, j, k;
int ii, jj, kk;
int iii, jjj, kkk;
#ifdef USE_LIKWID
#pragma omp parallel
{
LIKWID_MARKER_START("block_mm");
}
#endif
#ifdef USE_CALI_UNCORE
CALI_MARK_BEGIN("block_mm");
#endif
#pragma omp parallel private(iii,jjj,ii,jj,i,j,k)
{
#ifdef USE_CALI_REG
CALI_MARK_BEGIN("block_mm");
#endif
#pragma omp for
for (int i = 0; i < rows_a; i++ ) {
for (int jjj = 0; jjj < cols_b; jjj = jjj + BLOCK_ROWS)
for (int j = jjj; j < min(cols_b, jjj + BLOCK_ROWS); j++)
for (int kkk = 0; kkk < cols_a; kkk = kkk + BLOCK_COLS) {
// #pragma omp simd
// #pragma omp simd aligned(mat_a, mat_b, mat_c: 64)
// #pragma clang loop vectorize_width(8)
for (int k = kkk; k < min(cols_a,kkk + BLOCK_COLS); k++)
mat_c[i][j] = mat_c[i][j] + mat_a[i][k] * mat_b[j][k];
}
} // i
#ifdef USE_CALI_REG
CALI_MARK_END("block_mm");
#endif
}
#ifdef USE_CALI_UNCORE
CALI_MARK_END("block_mm");
#endif
#ifdef USE_LIKWID
#pragma omp parallel
{
LIKWID_MARKER_STOP("block_mm");
}
#endif
}
//multiply matrices together; b is transposed and the
//pre all matrices are initialized, c shouldn't have any important data in it
// mat b is trnasposed
// rows in b == cols in a
// c is initialized to the same size as b
//post mat_c has the result of multipling mat_a and mat_b
//Note this version includes unroll jam and blocking
void multiply_matrix_bu(double** restrict __attribute__((aligned (64))) mat_a, int rows_a, int cols_a,
double** restrict __attribute__((aligned (64))) mat_b, int cols_b,
double** restrict __attribute__((aligned (64))) mat_c) {
int i, j, k;
int ii, jj, kk;
int iii, jjj, kkk;
#ifdef USE_LIKWID
#pragma omp parallel
{
LIKWID_MARKER_START("block_mm");
}
#endif
#ifdef USE_CALI_UNCORE
CALI_MARK_BEGIN("block_mm");
#endif
#pragma omp parallel private(iii,jjj,ii,jj,i,j,k)
{
#ifdef USE_CALI_REG
CALI_MARK_BEGIN("block_mm");
#endif
int j;
#pragma omp for
for (int i = 0; i < rows_a; i++ ) {
for (int jjj = 0; jjj < cols_b; jjj = jjj + BLOCK_ROWS) {
// #pragma omp simd
for (j = jjj; j < min(cols_b, jjj + BLOCK_ROWS)-8; j+=8) {
for (int kkk = 0; kkk < cols_a; kkk = kkk + BLOCK_COLS) {
for (int k = kkk; k < min(cols_a,kkk + BLOCK_COLS); k++){
mat_c[i][j] = mat_c[i][j] + mat_a[i][k] * mat_b[j][k];
mat_c[i][j+1] = mat_c[i][j+1] + mat_a[i][k] * mat_b[j+1][k];
mat_c[i][j+2] = mat_c[i][j+2] + mat_a[i][k] * mat_b[j+2][k];
mat_c[i][j+3] = mat_c[i][j+3] + mat_a[i][k] * mat_b[j+3][k];
mat_c[i][j+4] = mat_c[i][j+4] + mat_a[i][k] * mat_b[j+4][k];
mat_c[i][j+5] = mat_c[i][j+5] + mat_a[i][k] * mat_b[j+5][k];
mat_c[i][j+6] = mat_c[i][j+6] + mat_a[i][k] * mat_b[j+6][k];
mat_c[i][j+7] = mat_c[i][j+7] + mat_a[i][k] * mat_b[j+7][k];
}
}
} // j
// remainder loop
for (j; j < min(cols_b, jjj + BLOCK_ROWS); j++) {
for (int kkk = 0; kkk < cols_a; kkk = kkk + BLOCK_COLS) {
#pragma omp simd
for (int k = kkk; k < min(cols_a,kkk + BLOCK_COLS); k++)
mat_c[i][j] = mat_c[i][j] + mat_a[i][k] * mat_b[j][k];
}
} // j rem
} // jjj
} // i
#ifdef USE_CALI_REG
CALI_MARK_END("block_mm");
#endif
}
#ifdef USE_CALI_UNCORE
CALI_MARK_END("block_mm");
#endif
#ifdef USE_LIKWID
#pragma omp parallel
{
LIKWID_MARKER_STOP("block_mm");
}
#endif
}
//multiply matrices together; b is transposed and the
//pre all matrices are initialized, c shouldn't have any important data in it
// mat b is trnasposed
// rows in b == cols in a
// c is initialized to the same size as b
//post mat_c has the result of multipling mat_a and mat_b
void multiply_matrix_bi(double** restrict __attribute__((aligned (64))) mat_a, int rows_a, int cols_a,
double** restrict __attribute__((aligned (64))) mat_b, int cols_b,
double** restrict __attribute__((aligned (64))) mat_c) {
int i, j, k;
int ii, jj, kk;
int iii, jjj, kkk;
#ifdef USE_LIKWID
#pragma omp parallel
{
LIKWID_MARKER_START("block_mm");
}
#endif
#ifdef USE_CALI_UNCORE
CALI_MARK_BEGIN("block_mm");
#endif
#pragma omp parallel private(iii,jjj,ii,jj,i,j,k)
{
#ifdef USE_CALI_REG
CALI_MARK_BEGIN("block_mm");
#endif
#define CHUNK_SIZE 256
#pragma omp for schedule(dynamic, CHUNK_SIZE)
for (int i = 0; i < rows_a; i++ ) {
for (int jjj = 0; jjj < cols_b; jjj = jjj + BLOCK_ROWS) {
for (int kkk = 0; kkk < cols_a; kkk = kkk + BLOCK_COLS) {
for (int j = jjj; j < min(cols_b, jjj + BLOCK_ROWS); j++) {
// #pragma omp simd aligned(mat_a, mat_b, mat_c: 64)
// #pragma clang loop vectorize_width(8)
// #pragma omp simd
for (int k = kkk; k < min(cols_a,kkk + BLOCK_COLS); k++) {
mat_c[i][j] = mat_c[i][j] + mat_a[i][k] * mat_b[j][k];
} // k
} // j
} // kkk
} // jjj
} // i
#ifdef USE_CALI_REG
CALI_MARK_END("block_mm");
#endif
}
#ifdef USE_CALI_UNCORE
CALI_MARK_END("block_mm");
#endif
#ifdef USE_LIKWID
#pragma omp parallel
{
LIKWID_MARKER_STOP("block_mm");
}
#endif
}
//multiply matrices together
//this version flips the j and k loops in an attempt to improve vectorization
//pre all matrices are initialized, c shouldn't have any important data in it
// mat b is trnasposed
// rows in b == cols in a
// c is initialized to the same size as b
//post mat_c has the result of multipling mat_a and mat_b
void multiply_matrix_f(double** restrict __attribute__((aligned (64))) mat_a, int rows_a, int cols_a,
double** restrict __attribute__((aligned (64))) mat_b, int cols_b,
double** restrict __attribute__((aligned (64))) mat_c) {
int i, j, k;
int ii, jj, kk;
int iii, jjj, kkk;
#ifdef USE_LIKWID
#pragma omp parallel
{
LIKWID_MARKER_START("block_mm");
}
#endif
#ifdef USE_CALI_UNCORE
CALI_MARK_BEGIN("block_mm");
#endif
#pragma omp parallel private(iii,jjj,ii,jj,i,j,k)
{
#ifdef USE_CALI_REG
CALI_MARK_BEGIN("block_mm");
#endif
#pragma omp for
for (int kkk = 0; kkk < cols_a; kkk = kkk + BLOCK_COLS)
for (int k = kkk; k < min(cols_a,kkk + BLOCK_COLS); k++)
for (int jjj = 0; jjj < cols_b; jjj = jjj + BLOCK_ROWS)
// #pragma omp simd
for (int j = jjj; j < min(cols_b, jjj + BLOCK_ROWS); j++)
for (int i = 0; i < rows_a; i++ ) {
mat_c[i][j] = mat_c[i][j] + mat_a[i][k] * mat_b[j][k];
} // i
#ifdef USE_CALI_REG
CALI_MARK_END("block_mm");
#endif
}
#ifdef USE_CALI_UNCORE
CALI_MARK_END("block_mm");
#endif
#ifdef USE_LIKWID
#pragma omp parallel
{
LIKWID_MARKER_STOP("block_mm");
}
#endif
}
//transpose a matrix
//pre all matrices are initialized, c shouldn't have any important data in it
// rows in c == cols in a
//post mat_c has the transpose of mat_a
void transpose_matrix(double** mat_a, int rows_a, int cols_a, double** mat_c) {
#pragma omp parallel for
for (int i = 0; i < rows_a; i++) {
for (int j = 0; j < cols_a; j++) {
mat_c[j][i] = mat_a[i][j];
}
}
}
//set a matrix to zero
//pre matrix_a has been allocated to rows_a x cols_a
//post mat_a is all zeros
void set_zero(double** mat_a, int rows_a, int cols_a) {
int i, j;
for (i = 0; i < rows_a; i++) {
for (j = 0; j < cols_a; j++) {
mat_a[i][j] = 0;
}
}
}
//set a matrix to the identity
//pre matrix_a has been allocated to rows_a x cols_a
//post mat_a has ones in the diagonal and zeros elsewhere
void set_identity(double** mat_a, int rows_a, int cols_a) {
int i, j;
for (i = 0; i < rows_a; i++) {
for (j = 0; j < cols_a; j++) {
mat_a[i][j] = (double)(i == j);
}
}
}
//deep copy of a to b
void copy_mat(double** mat_a, double** mat_c, int total_elms) {
int i;
for (i = 0; i < total_elms; i++)
mat_c[i] = mat_a[i];
}
void print_matrix(double** mat_a, int rows_a, int cols_a) {
int i, j;
for (i = 0; i < rows_a; i++) {
for (j = 0; j < cols_a; j++) {
printf("%.4f ", mat_a[i][j]);
}
printf("\n\n");
}
}
//returns abs(a)
//TODO make a macro?
double get_abs(double a) {
return (((a < 0.) * -2.) + 1.) * a;
}
|
GB_unop__ainv_bool_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ainv_bool_bool)
// op(A') function: GB (_unop_tran__ainv_bool_bool)
// C type: bool
// A type: bool
// cast: bool cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ainv_bool_bool)
(
bool *Cx, // Cx and Ax may be aliased
const bool *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
bool z = aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
bool aij = Ax [p] ;
bool z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ainv_bool_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
grid.c | /* Copyright 2014-2015 The Regents of the University of California.
* Copyright 2015-2019 Martin Uecker.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* 2011-2019 Martin Uecker <martin.uecker@med.uni-goettingen.de>
* 2014 Frank Ong <frankong@berkeley.edu>
*/
#include <math.h>
#include <complex.h>
#include <assert.h>
#include <string.h>
#include "num/multind.h"
#include "num/flpmath.h"
#include "num/specfun.h"
#include "misc/nested.h"
#include "misc/misc.h"
#include "grid.h"
static double kb(double beta, double x)
{
if (fabs(x) >= 0.5)
return 0.;
return bessel_i0(beta * sqrt(1. - pow(2. * x, 2.))) / bessel_i0(beta);
}
static void kb_precompute(double beta, int n, float table[n + 1])
{
for (int i = 0; i < n + 1; i++)
table[i] = kb(beta, (double)(i) / (double)(n - 1) / 2.);
}
static double ftkb(double beta, double x)
{
double a = sqrt(pow(beta, 2.) - pow(M_PI * x, 2.));
return ((0. == a) ? 1. : (a / sinh(a))); // * bessel_i0(beta);
}
static double rolloff(double x, double beta, double width)
{
return ftkb(beta, x * width) / ftkb(beta, 0.);
}
// Linear interpolation
static float lerp(float a, float b, float c)
{
return (1. - c) * a + c * b;
}
// Linear interpolation look up
static float intlookup(int n, const float table[n + 1], float x)
{
float fpart;
// fpart = modff(x * n, &ipart);
// int index = ipart;
int index = (int)(x * (n - 1));
fpart = x * (n - 1) - (float)index;
#if 1
assert(index >= 0);
assert(index <= n);
assert(fpart >= 0.);
assert(fpart <= 1.);
#endif
float l = lerp(table[index], table[index + 1], fpart);
#if 1
assert(l <= 1.);
assert(0 >= 0.);
#endif
return l;
}
enum { kb_size = 100 };
static float kb_table[kb_size + 1];
static float kb_beta = -1.;
void gridH(const struct grid_conf_s* conf, const complex float* traj, const long ksp_dims[4], complex float* dst, const long grid_dims[4], const complex float* grid)
{
long C = ksp_dims[3];
// precompute kaiser bessel table
#pragma omp critical
if (-1 == kb_beta) {
kb_precompute(conf->beta, kb_size, kb_table);
kb_beta = conf->beta;
}
assert(fabs(kb_beta - conf->beta) < 1.E-6);
assert(1 == ksp_dims[0]);
long samples = ksp_dims[1] * ksp_dims[2];
#pragma omp parallel for
for(int i = 0; i < samples; i++) {
float pos[3];
pos[0] = conf->os * (creal(traj[i * 3 + 0]));
pos[1] = conf->os * (creal(traj[i * 3 + 1]));
pos[2] = conf->os * (creal(traj[i * 3 + 2]));
pos[0] += (grid_dims[0] > 1) ? ((float)grid_dims[0] / 2.) : 0.;
pos[1] += (grid_dims[1] > 1) ? ((float)grid_dims[1] / 2.) : 0.;
pos[2] += (grid_dims[2] > 1) ? ((float)grid_dims[2] / 2.) : 0.;
complex float val[C];
for (int j = 0; j < C; j++)
val[j] = 0.0;
grid_pointH(C, 3, grid_dims, pos, val, grid, conf->periodic, conf->width, kb_size, kb_table);
for (int j = 0; j < C; j++)
dst[j * samples + i] += val[j];
}
}
void grid(const struct grid_conf_s* conf, const complex float* traj, const long grid_dims[4], complex float* grid, const long ksp_dims[4], const complex float* src)
{
long C = ksp_dims[3];
// precompute kaiser bessel table
#pragma omp critical
if (-1 == kb_beta) {
kb_precompute(conf->beta, kb_size, kb_table);
kb_beta = conf->beta;
}
assert(fabs(kb_beta - conf->beta) < 1.E-6);
assert(1 == ksp_dims[0]);
long samples = ksp_dims[1] * ksp_dims[2];
// grid
#pragma omp parallel for
for(int i = 0; i < samples; i++) {
float pos[3];
pos[0] = conf->os * (creal(traj[i * 3 + 0]));
pos[1] = conf->os * (creal(traj[i * 3 + 1]));
pos[2] = conf->os * (creal(traj[i * 3 + 2]));
pos[0] += (grid_dims[0] > 1) ? ((float) grid_dims[0] / 2.) : 0.;
pos[1] += (grid_dims[1] > 1) ? ((float) grid_dims[1] / 2.) : 0.;
pos[2] += (grid_dims[2] > 1) ? ((float) grid_dims[2] / 2.) : 0.;
complex float val[C];
for (int j = 0; j < C; j++)
val[j] = src[j * samples + i];
grid_point(C, 3, grid_dims, pos, grid, val, conf->periodic, conf->width, kb_size, kb_table);
}
}
static void grid2_dims(unsigned int D, const long trj_dims[D], const long ksp_dims[D], const long grid_dims[D])
{
assert(D >= 4);
assert(md_check_compat(D - 3, ~0, grid_dims + 3, ksp_dims + 3));
// assert(md_check_compat(D - 3, ~(MD_BIT(0) | MD_BIT(1)), trj_dims + 3, ksp_dims + 3));
assert(md_check_bounds(D - 3, ~0, trj_dims + 3, ksp_dims + 3));
assert(3 == trj_dims[0]);
assert(1 == trj_dims[3]);
assert(1 == ksp_dims[0]);
}
void grid2(const struct grid_conf_s* conf, unsigned int D, const long trj_dims[D], const complex float* traj, const long grid_dims[D], complex float* dst, const long ksp_dims[D], const complex float* src)
{
grid2_dims(D, trj_dims, ksp_dims, grid_dims);
long ksp_strs[D];
md_calc_strides(D, ksp_strs, ksp_dims, CFL_SIZE);
long trj_strs[D];
md_calc_strides(D, trj_strs, trj_dims, CFL_SIZE);
long grid_strs[D];
md_calc_strides(D, grid_strs, grid_dims, CFL_SIZE);
long pos[D];
for (unsigned int i = 0; i < D; i++)
pos[i] = 0;
do {
grid(conf, &MD_ACCESS(D, trj_strs, pos, traj),
grid_dims, &MD_ACCESS(D, grid_strs, pos, dst),
ksp_dims, &MD_ACCESS(D, ksp_strs, pos, src));
} while(md_next(D, ksp_dims, (~0 ^ 15), pos));
}
void grid2H(const struct grid_conf_s* conf, unsigned int D, const long trj_dims[D], const complex float* traj, const long ksp_dims[D], complex float* dst, const long grid_dims[D], const complex float* src)
{
grid2_dims(D, trj_dims, ksp_dims, grid_dims);
long ksp_strs[D];
md_calc_strides(D, ksp_strs, ksp_dims, CFL_SIZE);
long trj_strs[D];
md_calc_strides(D, trj_strs, trj_dims, CFL_SIZE);
long grid_strs[D];
md_calc_strides(D, grid_strs, grid_dims, CFL_SIZE);
long pos[D];
for (unsigned int i = 0; i < D; i++)
pos[i] = 0;
do {
gridH(conf, &MD_ACCESS(D, trj_strs, pos, traj),
ksp_dims, &MD_ACCESS(D, ksp_strs, pos, dst),
grid_dims, &MD_ACCESS(D, grid_strs, pos, src));
} while(md_next(D, ksp_dims, (~0 ^ 15), pos));
}
typedef void CLOSURE_TYPE(grid_update_t)(long ind, float d);
#ifndef __clang__
#define VLA(x) x
#else
// blocks extension does not play well even with arguments which
// just look like variably-modified types
#define VLA(x)
#endif
static void grid_point_gen(int N, const long dims[VLA(N)], const float pos[VLA(N)], bool periodic, float width, int kb_size, const float kb_table[VLA(kb_size + 1)], grid_update_t update)
{
#ifndef __clang__
int sti[N];
int eni[N];
int off[N];
#else
// blocks extension does not play well with variably-modified types
int* sti = alloca(sizeof(int[N]));
int* eni = alloca(sizeof(int[N]));
int* off = alloca(sizeof(int[N]));
#endif
for (int j = 0; j < N; j++) {
sti[j] = (int)ceil(pos[j] - width);
eni[j] = (int)floor(pos[j] + width);
off[j] = 0;
if (sti[j] > eni[j])
return;
if (!periodic) {
sti[j] = MAX(sti[j], 0);
eni[j] = MIN(eni[j], dims[j] - 1);
} else {
while (sti[j] + off[j] < 0)
off[j] += dims[j];
}
if (1 == dims[j]) {
assert(0. == pos[j]); // ==0. fails nondeterministically for test_nufft_forward bbdec08cb
sti[j] = 0;
eni[j] = 0;
}
}
__block NESTED(void, grid_point_r, (int N, long ind, float d)) // __block for recursion
{
if (0 == N) {
NESTED_CALL(update, (ind, d));
} else {
N--;
for (int w = sti[N]; w <= eni[N]; w++) {
float frac = fabs(((float)w - pos[N]));
float d2 = d * intlookup(kb_size, kb_table, frac / width);
long ind2 = (ind * dims[N] + ((w + off[N]) % dims[N]));
grid_point_r(N, ind2, d2);
}
}
};
grid_point_r(N, 0, 1.);
}
void grid_point(unsigned int ch, int N, const long dims[VLA(N)], const float pos[VLA(N)], complex float* dst, const complex float val[VLA(ch)], bool periodic, float width, int kb_size, const float kb_table[kb_size + 1])
{
NESTED(void, update, (long ind, float d))
{
for (unsigned int c = 0; c < ch; c++) {
// we are allowed to update real and imaginary part independently which works atomically
#pragma omp atomic
__real(dst[ind + c * dims[0] * dims[1] * dims[2]]) += __real(val[c]) * d;
#pragma omp atomic
__imag(dst[ind + c * dims[0] * dims[1] * dims[2]]) += __imag(val[c]) * d;
}
};
grid_point_gen(N, dims, pos, periodic, width, kb_size, kb_table, update);
}
void grid_pointH(unsigned int ch, int N, const long dims[VLA(N)], const float pos[VLA(N)], complex float val[VLA(ch)], const complex float* src, bool periodic, float width, int kb_size, const float kb_table[kb_size + 1])
{
NESTED(void, update, (long ind, float d))
{
for (unsigned int c = 0; c < ch; c++) {
// we are allowed to update real and imaginary part independently which works atomically
#pragma omp atomic
__real(val[c]) += __real(src[ind + c * dims[0] * dims[1] * dims[2]]) * d;
#pragma omp atomic
__imag(val[c]) += __imag(src[ind + c * dims[0] * dims[1] * dims[2]]) * d;
}
};
grid_point_gen(N, dims, pos, periodic, width, kb_size, kb_table, update);
}
double calc_beta(float os, float width)
{
return M_PI * sqrt(pow((width * 2. / os) * (os - 0.5), 2.) - 0.8);
}
static float pos(int d, int i)
{
return (1 == d) ? 0. : (((float)i - (float)d / 2.) / (float)d);
}
void rolloff_correction(float os, float width, float beta, const long dimensions[3], complex float* dst)
{
#pragma omp parallel for collapse(3)
for (int z = 0; z < dimensions[2]; z++)
for (int y = 0; y < dimensions[1]; y++)
for (int x = 0; x < dimensions[0]; x++)
dst[x + dimensions[0] * (y + z * dimensions[1])]
= rolloff(os * pos(dimensions[0], x), beta, width)
* rolloff(os * pos(dimensions[1], y), beta, width)
* rolloff(os * pos(dimensions[2], z), beta, width);
}
|
mclib.c | #include "mcrat.h"
//define constants
const double A_RAD=7.56e-15, C_LIGHT=2.99792458e10, PL_CONST=6.6260755e-27, FINE_STRUCT=7.29735308e-3, CHARGE_EL= 4.8032068e-10;
const double K_B=1.380658e-16, M_P=1.6726231e-24, THOM_X_SECT=6.65246e-25, M_EL=9.1093879e-28 , R_EL=2.817941499892705e-13;
void photonInjection(struct photon **ph, int *ph_num, double r_inj, double ph_weight, int min_photons, int max_photons, char spect, double theta_min, double theta_max, struct hydro_dataframe *hydro_data, gsl_rng * rand, FILE *fPtr)
{
int i=0, block_cnt=0, *ph_dens=NULL, ph_tot=0, j=0,k=0;
double ph_dens_calc=0.0, fr_dum=0.0, y_dum=0.0, yfr_dum=0.0, fr_max=0, bb_norm=0, position_phi, ph_weight_adjusted, rmin, rmax;
double com_v_phi, com_v_theta, *p_comv=NULL, *boost=NULL; //comoving phi, theta, comoving 4 momentum for a photon, and boost for photon(to go to lab frame)
double *l_boost=NULL; //pointer to hold array of lorentz boost, to lab frame, values
float num_dens_coeff;
double r_grid_innercorner=0, r_grid_outercorner=0, theta_grid_innercorner=0, theta_grid_outercorner=0;
double position_rand=0, position2_rand=0, position3_rand=0, cartesian_position_rand_array[3];
if (spect=='w') //from MCRAT paper, w for wien spectrum
{
num_dens_coeff=8.44;
//printf("in wien spectrum\n");
}
else
{
num_dens_coeff=20.29; //this is for black body spectrum
//printf("in BB spectrum");
}
//find how many blocks are near the injection radius within the angles defined in mc.par, get temperatures and calculate number of photons to allocate memory for
//and then rcord which blocks have to have "x" amount of photons injected there
rmin=r_inj - 0.5*C_LIGHT/hydro_data->fps;
rmax=r_inj + 0.5*C_LIGHT/hydro_data->fps;
for(i=0; i<hydro_data->num_elements; i++)
{
#if DIMENSIONS == THREE
//want inner corner to be close to origin, therfore ned to have abs for 3D cartesian with negative coordinates, shouldnt affect the other geometry systems since theyre all defined from r=0, theta=0, phi=0
//hydroCoordinateToSpherical(&r_grid_innercorner, &theta_grid_innercorner, (hydro_data->r0)[i]-0.5*(hydro_data->r0_size)[i], (hydro_data->r1)[i]-0.5*(hydro_data->r1_size)[i], (hydro_data->r2)[i]-0.5*(hydro_data->r2_size)[i]);
//hydroCoordinateToSpherical(&r_grid_outercorner, &theta_grid_outercorner, (hydro_data->r0)[i]+0.5*(hydro_data->r0_size)[i], (hydro_data->r1)[i]+0.5*(hydro_data->r1_size)[i], (hydro_data->r2)[i]+0.5*(hydro_data->r2_size)[i]);
//therefore do whats below
hydroCoordinateToSpherical(&r_grid_innercorner, &theta_grid_innercorner, fabs((hydro_data->r0)[i])-0.5*(hydro_data->r0_size)[i], fabs((hydro_data->r1)[i])-0.5*(hydro_data->r1_size)[i], fabs((hydro_data->r2)[i])-0.5*(hydro_data->r2_size)[i]);
hydroCoordinateToSpherical(&r_grid_outercorner, &theta_grid_outercorner, fabs((hydro_data->r0)[i])+0.5*(hydro_data->r0_size)[i], fabs((hydro_data->r1)[i])+0.5*(hydro_data->r1_size)[i], fabs((hydro_data->r2)[i])+0.5*(hydro_data->r2_size)[i]);
#else
hydroCoordinateToSpherical(&r_grid_innercorner, &theta_grid_innercorner, (hydro_data->r0)[i]-0.5*(hydro_data->r0_size)[i], (hydro_data->r1)[i]-0.5*(hydro_data->r1_size)[i], 0);
hydroCoordinateToSpherical(&r_grid_outercorner, &theta_grid_outercorner, (hydro_data->r0)[i]+0.5*(hydro_data->r0_size)[i], (hydro_data->r1)[i]+0.5*(hydro_data->r1_size)[i], 0);
#endif
//look at all boxes in width delta r=c/fps and within angles we are interested in
//if ((rmin <= r_grid_outercorner) && (r_grid_innercorner <= rmax ) && (theta_grid_outercorner >= theta_min) && (theta_grid_innercorner <= theta_max) && ((hydro_data->r0_size)[i]<1e11) && ((hydro_data->r1_size)[i]<0.09))
if ((rmin <= r_grid_outercorner) && (r_grid_innercorner <= rmax ) && (theta_grid_outercorner >= theta_min) && (theta_grid_innercorner <= theta_max))
{
//&& ((hydro_data->r0_size)[i]<1e11) && ((hydro_data->r1)[i]<3.0*3.14/180) is just for testing sph_3d mcrat sim to see if block_cnt is the issue for the 200x normalization issue -> this fixed norm issue, not N_scatt issue when start at frame 0
// also try injecting photons in frame 1 without above conditions -> didnt fix normalization issue not N_scatt issue
// also try inj at frame 1 with scale 1e11 -> didnt fixed normalization issue not N_scatt issue
// also try inj at frame 0 (orig) to see what gets printed for diagnosing CHOMBO refinement levels being an issue
// try inj at frame 0 with modified if statement and L scale 1e11
block_cnt++;
//#if DIMENSIONS == THREE
//fprintf(fPtr,"rmin %e rmax %e thetamin %e thetamax %e hydro: r0 %e r1 %e r2 %e r0_size %e r1_size %e r2_size %e r_inner %e theta_inner %e r_outer %e theta_outer %e\n", rmin, rmax, theta_min, theta_max, (hydro_data->r0)[i], (hydro_data->r1)[i], (hydro_data->r2)[i], (hydro_data->r0_size)[i], (hydro_data->r1_size)[i], (hydro_data->r2_size)[i], r_grid_innercorner, theta_grid_innercorner, r_grid_outercorner, theta_grid_outercorner);
//#else
//fprintf(fPtr,"rmin %e rmax %e thetamin %e thetamax %e hydro: r0 %e r1 %e r0_size %e r1_size %e r_inner %e theta_inner %e r_outer %e theta_outer %e dens %e\n", rmin, rmax, theta_min, theta_max, (hydro_data->r0)[i], (hydro_data->r1)[i], (hydro_data->r0_size)[i], (hydro_data->r1_size)[i], r_grid_innercorner, theta_grid_innercorner, r_grid_outercorner, theta_grid_outercorner, (hydro_data->dens)[i]);
//#endif
//fflush(fPtr);
}
}
//printf("Blocks: %d\n", block_cnt);
//allocate memory to record density of photons for each block
ph_dens=malloc(block_cnt * sizeof(int));
//calculate the photon density for each block and save it to the array
j=0;
ph_tot=0;
ph_weight_adjusted=ph_weight;
//printf("%d %d\n", max_photons, min_photons);
while ((ph_tot>max_photons) || (ph_tot<min_photons) )
{
j=0;
ph_tot=0;
for (i=0;i<hydro_data->num_elements;i++)
{
//printf("%d\n",i);
//printf("%e, %e, %e, %e, %e, %e\n", *(r+i),(r_inj - C_LIGHT/fps), (r_inj + C_LIGHT/fps), *(theta+i) , theta_max, theta_min);
#if DIMENSIONS == THREE
//want inner corner to be close to origin, therfore ned to have abs for 3D cartesian with negative coordinates, shouldnt affect the other geometry systems since theyre all defined from r=0, theta=0, phi=0
//hydroCoordinateToSpherical(&r_grid_innercorner, &theta_grid_innercorner, (hydro_data->r0)[i]-0.5*(hydro_data->r0_size)[i], (hydro_data->r1)[i]-0.5*(hydro_data->r1_size)[i], (hydro_data->r2)[i]-0.5*(hydro_data->r2_size)[i]);
//hydroCoordinateToSpherical(&r_grid_outercorner, &theta_grid_outercorner, (hydro_data->r0)[i]+0.5*(hydro_data->r0_size)[i], (hydro_data->r1)[i]+0.5*(hydro_data->r1_size)[i], (hydro_data->r2)[i]+0.5*(hydro_data->r2_size)[i]);
//therefore do whats below
hydroCoordinateToSpherical(&r_grid_innercorner, &theta_grid_innercorner, fabs((hydro_data->r0)[i])-0.5*(hydro_data->r0_size)[i], fabs((hydro_data->r1)[i])-0.5*(hydro_data->r1_size)[i], fabs((hydro_data->r2)[i])-0.5*(hydro_data->r2_size)[i]);
hydroCoordinateToSpherical(&r_grid_outercorner, &theta_grid_outercorner, fabs((hydro_data->r0)[i])+0.5*(hydro_data->r0_size)[i], fabs((hydro_data->r1)[i])+0.5*(hydro_data->r1_size)[i], fabs((hydro_data->r2)[i])+0.5*(hydro_data->r2_size)[i]);
#else
hydroCoordinateToSpherical(&r_grid_innercorner, &theta_grid_innercorner, (hydro_data->r0)[i]-0.5*(hydro_data->r0_size)[i], (hydro_data->r1)[i]-0.5*(hydro_data->r1_size)[i], 0);
hydroCoordinateToSpherical(&r_grid_outercorner, &theta_grid_outercorner, (hydro_data->r0)[i]+0.5*(hydro_data->r0_size)[i], (hydro_data->r1)[i]+0.5*(hydro_data->r1_size)[i], 0);
#endif
//if ((rmin <= r_grid_outercorner) && (r_grid_innercorner <= rmax ) && (theta_grid_outercorner >= theta_min) && (theta_grid_innercorner <= theta_max) && ((hydro_data->r0_size)[i]<1e11) && ((hydro_data->r1_size)[i]<0.09))
if ((rmin <= r_grid_outercorner) && (r_grid_innercorner <= rmax ) && (theta_grid_outercorner >= theta_min) && (theta_grid_innercorner <= theta_max))
{
ph_dens_calc=(4.0/3.0)*hydroElementVolume(hydro_data, i) *(((hydro_data->gamma)[i]*num_dens_coeff*(hydro_data->temp)[i]*(hydro_data->temp)[i]*(hydro_data->temp)[i])/ph_weight_adjusted); //4 comes from L \propto 4p in the limit radiation pressure is greater than the matter energy density and 3 comes from p=u/3, where u is the energy density
(*(ph_dens+j))=gsl_ran_poisson(rand,ph_dens_calc) ; //choose from poission distribution with mean of ph_dens_calc
//printf("%d, %lf \n",*(ph_dens+j), ph_dens_calc);
//sum up all the densities to get total number of photons
ph_tot+=(*(ph_dens+j));
j++;
}
}
if (ph_tot>max_photons)
{
//if the number of photons is too big make ph_weight larger
ph_weight_adjusted*=10;
}
else if (ph_tot<min_photons)
{
ph_weight_adjusted*=0.5;
}
//printf("dens: %d, photons: %d\n", *(ph_dens+(j-1)), ph_tot);
}
//printf("%d\n", ph_tot);
//allocate memory for that many photons and also allocate memory to hold comoving 4 momentum of each photon and the velocity of the fluid
(*ph)=malloc (ph_tot * sizeof (struct photon ));
p_comv=malloc(4*sizeof(double));
boost=malloc(3*sizeof(double));
l_boost=malloc(4*sizeof(double));
//go through blocks and assign random energies/locations to proper number of photons
ph_tot=0;
k=0;
for (i=0;i<hydro_data->num_elements;i++)
{
#if DIMENSIONS == THREE
//want inner corner to be close to origin, therfore ned to have abs for 3D cartesian with negative coordinates, shouldnt affect the other geometry systems since theyre all defined from r=0, theta=0, phi=0
//hydroCoordinateToSpherical(&r_grid_innercorner, &theta_grid_innercorner, (hydro_data->r0)[i]-0.5*(hydro_data->r0_size)[i], (hydro_data->r1)[i]-0.5*(hydro_data->r1_size)[i], (hydro_data->r2)[i]-0.5*(hydro_data->r2_size)[i]);
//hydroCoordinateToSpherical(&r_grid_outercorner, &theta_grid_outercorner, (hydro_data->r0)[i]+0.5*(hydro_data->r0_size)[i], (hydro_data->r1)[i]+0.5*(hydro_data->r1_size)[i], (hydro_data->r2)[i]+0.5*(hydro_data->r2_size)[i]);
//therefore do whats below
hydroCoordinateToSpherical(&r_grid_innercorner, &theta_grid_innercorner, fabs((hydro_data->r0)[i])-0.5*(hydro_data->r0_size)[i], fabs((hydro_data->r1)[i])-0.5*(hydro_data->r1_size)[i], fabs((hydro_data->r2)[i])-0.5*(hydro_data->r2_size)[i]);
hydroCoordinateToSpherical(&r_grid_outercorner, &theta_grid_outercorner, fabs((hydro_data->r0)[i])+0.5*(hydro_data->r0_size)[i], fabs((hydro_data->r1)[i])+0.5*(hydro_data->r1_size)[i], fabs((hydro_data->r2)[i])+0.5*(hydro_data->r2_size)[i]);
#else
hydroCoordinateToSpherical(&r_grid_innercorner, &theta_grid_innercorner, (hydro_data->r0)[i]-0.5*(hydro_data->r0_size)[i], (hydro_data->r1)[i]-0.5*(hydro_data->r1_size)[i], 0);
hydroCoordinateToSpherical(&r_grid_outercorner, &theta_grid_outercorner, (hydro_data->r0)[i]+0.5*(hydro_data->r0_size)[i], (hydro_data->r1)[i]+0.5*(hydro_data->r1_size)[i], 0);
#endif
//if ((rmin <= r_grid_outercorner) && (r_grid_innercorner <= rmax ) && (theta_grid_outercorner >= theta_min) && (theta_grid_innercorner <= theta_max) && ((hydro_data->r0_size)[i]<1e11) && ((hydro_data->r1_size)[i]<0.09))
if ((rmin <= r_grid_outercorner) && (r_grid_innercorner <= rmax ) && (theta_grid_outercorner >= theta_min) && (theta_grid_innercorner <= theta_max))
{
for(j=0;j<( *(ph_dens+k) ); j++ )
{
//have to get random frequency for the photon comoving frequency
y_dum=1; //initalize loop
yfr_dum=0;
while (y_dum>yfr_dum)
{
fr_dum=gsl_rng_uniform_pos(rand)*6.3e11*((hydro_data->temp)[i]); //in Hz
//printf("%lf, %lf ",gsl_rng_uniform_pos(rand), (*(temps+i)));
y_dum=gsl_rng_uniform_pos(rand);
//printf("%lf ",fr_dum);
if (spect=='w')
{
yfr_dum=(1.0/(1.29e31))*pow((fr_dum/((hydro_data->temp)[i])),3.0)/(exp((PL_CONST*fr_dum)/(K_B*((hydro_data->temp)[i]) ))-1); //curve is normalized to maximum
}
else
{
fr_max=(5.88e10)*((hydro_data->temp)[i]);//(C_LIGHT*(*(temps+i)))/(0.29); //max frequency of bb
bb_norm=(PL_CONST*fr_max * pow((fr_max/C_LIGHT),2.0))/(exp(PL_CONST*fr_max/(K_B*((hydro_data->temp)[i])))-1); //find value of bb at fr_max
yfr_dum=((1.0/bb_norm)*PL_CONST*fr_dum * pow((fr_dum/C_LIGHT),2.0))/(exp(PL_CONST*fr_dum/(K_B*((hydro_data->temp)[i])))-1); //curve is normalized to vaue of bb @ max frequency
}
//printf("%lf, %lf,%lf,%e \n",(*(temps+i)),fr_dum, y_dum, yfr_dum);
}
//printf("i: %d freq:%lf\n ",ph_tot, fr_dum);
#if DIMENSIONS == TWO || DIMENSIONS == TWO_POINT_FIVE
position_phi=gsl_rng_uniform(rand)*2*M_PI;
#else
position_phi=0;//dont need this in 3D
#endif
com_v_phi=gsl_rng_uniform(rand)*2*M_PI;
com_v_theta=acos((gsl_rng_uniform(rand)*2)-1);
//printf("%lf, %lf, %lf\n", position_phi, com_v_phi, com_v_theta);
//populate 4 momentum comoving array
*(p_comv+0)=PL_CONST*fr_dum/C_LIGHT;
*(p_comv+1)=(PL_CONST*fr_dum/C_LIGHT)*sin(com_v_theta)*cos(com_v_phi);
*(p_comv+2)=(PL_CONST*fr_dum/C_LIGHT)*sin(com_v_theta)*sin(com_v_phi);
*(p_comv+3)=(PL_CONST*fr_dum/C_LIGHT)*cos(com_v_theta);
//populate boost matrix, not sure why multiplying by -1, seems to give correct answer in old python code...
#if DIMENSIONS == THREE
hydroVectorToCartesian(boost, (hydro_data->v0)[i], (hydro_data->v1)[i], (hydro_data->v2)[i], (hydro_data->r0)[i], (hydro_data->r1)[i], (hydro_data->r2)[i]);
#elif DIMENSIONS == TWO_POINT_FIVE
hydroVectorToCartesian(boost, (hydro_data->v0)[i], (hydro_data->v1)[i], (hydro_data->v2)[i], (hydro_data->r0)[i], (hydro_data->r1)[i], position_phi);
#else
//this may have to change if PLUTO can save vectors in 3D when conidering 2D sim
hydroVectorToCartesian(boost, (hydro_data->v0)[i], (hydro_data->v1)[i], 0, (hydro_data->r0)[i], (hydro_data->r1)[i], position_phi);
#endif
(*(boost+0))*=-1;
(*(boost+1))*=-1;
(*(boost+2))*=-1;
//boost to lab frame
lorentzBoost(boost, p_comv, l_boost, 'p', fPtr);
//printf("Assignemnt: %e, %e, %e, %e\n", *(l_boost+0), *(l_boost+1), *(l_boost+2),*(l_boost+3));
(*ph)[ph_tot].p0=(*(l_boost+0));
(*ph)[ph_tot].p1=(*(l_boost+1));
(*ph)[ph_tot].p2=(*(l_boost+2));
(*ph)[ph_tot].p3=(*(l_boost+3));
(*ph)[ph_tot].comv_p0=(*(p_comv+0));
(*ph)[ph_tot].comv_p1=(*(p_comv+1));
(*ph)[ph_tot].comv_p2=(*(p_comv+2));
(*ph)[ph_tot].comv_p3=(*(p_comv+3));
//place photons in rand positions within fluid element
position_rand=gsl_rng_uniform_pos(rand)*((hydro_data->r0_size)[i])-0.5*((hydro_data->r0_size)[i]); //choose between -size/2 to size/2
position2_rand=gsl_rng_uniform_pos(rand)*((hydro_data->r1_size)[i])-0.5*((hydro_data->r1_size)[i]);
#if DIMENSIONS == THREE
position3_rand=gsl_rng_uniform_pos(rand)*((hydro_data->r2_size)[i])-0.5*((hydro_data->r2_size)[i]);
hydroCoordinateToMcratCoordinate(&cartesian_position_rand_array, (hydro_data->r0)[i]+position_rand, (hydro_data->r1)[i]+position2_rand, (hydro_data->r2)[i]+position3_rand);
#else
hydroCoordinateToMcratCoordinate(&cartesian_position_rand_array, (hydro_data->r0)[i]+position_rand, (hydro_data->r1)[i]+position2_rand, position_phi);
#endif
//assign random position
(*ph)[ph_tot].r0=cartesian_position_rand_array[0];
(*ph)[ph_tot].r1=cartesian_position_rand_array[1];
(*ph)[ph_tot].r2=cartesian_position_rand_array[2];
//fprintf(fPtr,"%d %e %e %e\n", ph_tot, (*ph)[ph_tot].r0, (*ph)[ph_tot].r1, (*ph)[ph_tot].r2);
(*ph)[ph_tot].s0=1; //initalize stokes parameters as non polarized photon, stokes parameterized are normalized such that I always =1
(*ph)[ph_tot].s1=0;
(*ph)[ph_tot].s2=0;
(*ph)[ph_tot].s3=0;
(*ph)[ph_tot].num_scatt=0;
(*ph)[ph_tot].weight=ph_weight_adjusted;
(*ph)[ph_tot].nearest_block_index=0;
(*ph)[ph_tot].type=INJECTED_PHOTON; //i for injected
//printf("%d\n",ph_tot);
ph_tot++;
}
k++;
}
}
*ph_num=ph_tot; //save number of photons
//printf(" %d: %d\n", *(ph_dens+(k-1)), *ph_num);
free(ph_dens); free(p_comv);free(boost); free(l_boost);
//exit(0);
}
void lorentzBoost(double *boost, double *p_ph, double *result, char object, FILE *fPtr)
{
//function to perform lorentz boost
//if doing boost for an electron last argument is 'e' and there wont be a check for zero norm
//if doing boost for a photon last argument is 'p' and there will be a check for zero norm
double beta=0, gamma=0, *boosted_p=NULL;
gsl_vector_view b=gsl_vector_view_array(boost, 3); //make boost pointer into vector
gsl_vector_view p=gsl_vector_view_array(p_ph, 4); //make boost pointer into vector
gsl_matrix *lambda1= gsl_matrix_calloc (4, 4); //create matrix thats 4x4 to do lorentz boost
gsl_vector *p_ph_prime =gsl_vector_calloc(4); //create vestor to hold lorentz boosted vector
/*
fprintf(fPtr,"Boost: %e, %e, %e, %e\n",gsl_blas_dnrm2(&b.vector), *(boost+0), *(boost+1), *(boost+2));
fflush(fPtr);
fprintf(fPtr,"4 Momentum to Boost: %e, %e, %e, %e\n",*(p_ph+0), *(p_ph+1), *(p_ph+2), *(p_ph+3));
fflush(fPtr);
*/
//if magnitude of fluid velocity is != 0 do lorentz boost otherwise dont need to do a boost
if (gsl_blas_dnrm2(&b.vector) > 0)
{
//fprintf(fPtr,"in If\n");
//fflush(fPtr);
beta=gsl_blas_dnrm2(&b.vector);
gamma=1.0/sqrt(1-beta*beta);
//fprintf(fPtr,"Beta: %e\tGamma: %e\n",beta,gamma );
//fflush(fPtr);
//initalize matrix values
gsl_matrix_set(lambda1, 0,0, gamma);
gsl_matrix_set(lambda1, 0,1, -1*gsl_vector_get(&b.vector,0)*gamma);
gsl_matrix_set(lambda1, 0,2, -1*gsl_vector_get(&b.vector,1)*gamma);
gsl_matrix_set(lambda1, 0,3, -1*gsl_vector_get(&b.vector,2)*gamma);
gsl_matrix_set(lambda1, 1,1, 1+((gamma-1)*(gsl_vector_get(&b.vector,0)*gsl_vector_get(&b.vector,0))/(beta*beta) ) );
gsl_matrix_set(lambda1, 1,2, ((gamma-1)*(gsl_vector_get(&b.vector,0)* gsl_vector_get(&b.vector,1)/(beta*beta) ) ));
gsl_matrix_set(lambda1, 1,3, ((gamma-1)*(gsl_vector_get(&b.vector,0)* gsl_vector_get(&b.vector,2)/(beta*beta) ) ));
gsl_matrix_set(lambda1, 2,2, 1+((gamma-1)*(gsl_vector_get(&b.vector,1)*gsl_vector_get(&b.vector,1))/(beta*beta) ) );
gsl_matrix_set(lambda1, 2,3, ((gamma-1)*(gsl_vector_get(&b.vector,1)* gsl_vector_get(&b.vector,2))/(beta*beta) ) );
gsl_matrix_set(lambda1, 3,3, 1+((gamma-1)*(gsl_vector_get(&b.vector,2)*gsl_vector_get(&b.vector,2))/(beta*beta) ) );
gsl_matrix_set(lambda1, 1,0, gsl_matrix_get(lambda1,0,1));
gsl_matrix_set(lambda1, 2,0, gsl_matrix_get(lambda1,0,2));
gsl_matrix_set(lambda1, 3,0, gsl_matrix_get(lambda1,0,3));
gsl_matrix_set(lambda1, 2,1, gsl_matrix_get(lambda1,1,2));
gsl_matrix_set(lambda1, 3,1, gsl_matrix_get(lambda1,1,3));
gsl_matrix_set(lambda1, 3,2, gsl_matrix_get(lambda1,2,3));
gsl_blas_dgemv(CblasNoTrans, 1, lambda1, &p.vector, 0, p_ph_prime );
/*
fprintf(fPtr,"Lorentz Boost Matrix 0: %e,%e, %e, %e\n", gsl_matrix_get(lambda1, 0,0), gsl_matrix_get(lambda1, 0,1), gsl_matrix_get(lambda1, 0,2), gsl_matrix_get(lambda1, 0,3));
fflush(fPtr);
fprintf(fPtr,"Lorentz Boost Matrix 1: %e,%e, %e, %e\n", gsl_matrix_get(lambda1, 1,0), gsl_matrix_get(lambda1, 1,1), gsl_matrix_get(lambda1, 1,2), gsl_matrix_get(lambda1, 1,3));
fflush(fPtr);
fprintf(fPtr,"Lorentz Boost Matrix 2: %e,%e, %e, %e\n", gsl_matrix_get(lambda1, 2,0), gsl_matrix_get(lambda1, 2,1), gsl_matrix_get(lambda1, 2,2), gsl_matrix_get(lambda1, 2,3));
fflush(fPtr);
fprintf(fPtr,"Lorentz Boost Matrix 3: %e,%e, %e, %e\n", gsl_matrix_get(lambda1, 3,0), gsl_matrix_get(lambda1, 3,1), gsl_matrix_get(lambda1, 3,2), gsl_matrix_get(lambda1, 3,3));
fflush(fPtr);
fprintf(fPtr,"Before Check: %e %e %e %e\n ",gsl_vector_get(p_ph_prime, 0), gsl_vector_get(p_ph_prime, 1), gsl_vector_get(p_ph_prime, 2), gsl_vector_get(p_ph_prime, 3));
fflush(fPtr);
*/
//double check vector for 0 norm condition if photon
if (object == 'p')
{
//fprintf(fPtr,"In if\n");
boosted_p=zeroNorm(gsl_vector_ptr(p_ph_prime, 0));
}
else
{
boosted_p=gsl_vector_ptr(p_ph_prime, 0);
}
/*
fprintf(fPtr,"After Check: %e %e %e %e\n ", *(boosted_p+0),*(boosted_p+1),*(boosted_p+2),*(boosted_p+3) );
fflush(fPtr);
* */
}
else
{
/*
fprintf(fPtr,"in else");
fflush(fPtr);
* */
//double check vector for 0 norm condition
if (object=='p')
{
boosted_p=zeroNorm(p_ph);
}
else
{
//if 4 momentum isnt for photon and there is no boost to be done, we dont care about normality and just want back what was passed to lorentz boost
boosted_p=gsl_vector_ptr(&p.vector, 0);
}
}
//assign values to result
*(result+0)=*(boosted_p+0);
*(result+1)=*(boosted_p+1);
*(result+2)=*(boosted_p+2);
*(result+3)=*(boosted_p+3);
//free up memory
//free(boosted_p);
gsl_matrix_free (lambda1); gsl_vector_free(p_ph_prime);
}
double *zeroNorm(double *p_ph)
{
//ensures zero norm condition of photon 4 monetum is held
int i=0;
double normalizing_factor=0;
gsl_vector_view p=gsl_vector_view_array((p_ph+1), 3); //make last 3 elements of p_ph pointer into vector
if (*(p_ph+0) != gsl_blas_dnrm2(&p.vector ) )
{
normalizing_factor=(gsl_blas_dnrm2(&p.vector ));
//fprintf(fPtr,"in zero norm if\n");
//fflush(fPtr);
//go through and correct 4 momentum assuming the energy is correct
*(p_ph+1)= ((*(p_ph+1))/(normalizing_factor))*(*(p_ph+0));
*(p_ph+2)= ((*(p_ph+2))/(normalizing_factor))*(*(p_ph+0));
*(p_ph+3)= ((*(p_ph+3))/(normalizing_factor))*(*(p_ph+0));
}
/*
if (pow((*(p_ph+0)),2) != ( pow((*(p_ph+1)),2)+pow((*(p_ph+2)),2)+pow((*(p_ph+3)),2) ) )
{
printf("This isnt normalized in the function\nThe difference is: %e\n", pow((*(p_ph+0)),2) - ( pow((*(p_ph+1)),2)+pow((*(p_ph+2)),2)+pow((*(p_ph+3)),2) ) );
}
*/ //normalized within a factor of 10^-53
return p_ph;
}
int findNearestPropertiesAndMinMFP( struct photon *ph, int num_ph, double *all_time_steps, int *sorted_indexes, struct hydro_dataframe *hydro_data, gsl_rng * rand, int find_nearest_block_switch, FILE *fPtr)
{
int i=0, min_index=0, ph_block_index=0, num_thread=1, thread_id=0;
double ph_x=0, ph_y=0, ph_phi=0, ph_z=0, ph_r=0, ph_theta=0;
double fl_v_x=0, fl_v_y=0, fl_v_z=0; //to hold the fluid velocity in MCRaT coordinates
double ph_v_norm=0, fl_v_norm=0, synch_x_sect=0;
double n_cosangle=0, n_dens_lab_tmp=0,n_vx_tmp=0, n_vy_tmp=0, n_vz_tmp=0, n_temp_tmp=0 ;
double rnd_tracker=0, n_dens_min=0, n_vx_min=0, n_vy_min=0, n_vz_min=0, n_temp_min=0;
#if defined(_OPENMP)
num_thread=omp_get_num_threads(); //default is one above if theres no openmp usage
#endif
bool is_in_block=0; //boolean to determine if the photon is outside of its previously noted block
int index=0, num_photons_find_new_element=0;
double mfp=0,min_mfp=0, beta=0;
double el_p[4];
double ph_p_comv[4], ph_p[4], fluid_beta[3], photon_hydro_coord[3];
//initialize gsl random number generator fo each thread
const gsl_rng_type *rng_t;
gsl_rng **rng;
gsl_rng_env_setup();
rng_t = gsl_rng_ranlxs0;
rng = (gsl_rng **) malloc((num_thread ) * sizeof(gsl_rng *));
rng[0]=rand;
//#pragma omp parallel for num_threads(nt)
for(i=1;i<num_thread;i++)
{
rng[i] = gsl_rng_alloc (rng_t);
gsl_rng_set(rng[i],gsl_rng_get(rand));
}
//go through each photon and find the blocks around it and then get the distances to all of those blocks and choose the one thats the shortest distance away
//can optimize here, exchange the for loops and change condition to compare to each of the photons is the radius of the block is .95 (or 1.05) times the min (max) photon radius
//or just parallelize this part here
min_mfp=1e12;
#pragma omp parallel for num_threads(num_thread) firstprivate( is_in_block, ph_block_index, ph_x, ph_y, ph_z, ph_phi, ph_r, min_index, n_dens_lab_tmp,n_vx_tmp, n_vy_tmp, n_vz_tmp, n_temp_tmp, fl_v_x, fl_v_y, fl_v_z, fl_v_norm, ph_v_norm, n_cosangle, mfp, beta, rnd_tracker, ph_p_comv, el_p, ph_p, fluid_beta) private(i) shared(min_mfp ) reduction(+:num_photons_find_new_element)
for (i=0;i<num_ph; i++)
{
//fprintf(fPtr, "%d, %d,%e\n", i, ((ph+i)->nearest_block_index), ((ph+i)->weight));
//fflush(fPtr);
if (find_nearest_block_switch==0)
{
ph_block_index=(ph+i)->nearest_block_index; //if starting a new frame the number of indexes can change and cause a seg fault here
}
else
{
ph_block_index=0; // therefore if starting a new frame set index=0 to avoid this issue
}
mcratCoordinateToHydroCoordinate(&photon_hydro_coord, (ph+i)->r0, (ph+i)->r1, (ph+i)->r2);//convert the photons coordinate to the hydro sim coordinate system
//printf("ph_x:%e, ph_y:%e\n", ph_x, ph_y);
//if the location of the photon is inside the domain of the hydro simulation then do all of this, otherwise assign huge mfp value so no scattering occurs and the next frame is loaded
// absorbed photons have ph_block_index=-1, therefore if this value is not less than 0, calulate the mfp properly but doesnt work when go to new frame and find new indexes (will change b/c will get rid of these photons when printing)
//alternatively make decision based on 0 weight
#if DIMENSIONS == TWO || DIMENSIONS == TWO_POINT_FIVE
if (((photon_hydro_coord[1]<(hydro_data->r1_domain)[1]) &&
(photon_hydro_coord[1]>(hydro_data->r1_domain)[0]) &&
(photon_hydro_coord[0]<(hydro_data->r0_domain)[1]) &&
(photon_hydro_coord[0]>(hydro_data->r0_domain)[0])) && ((ph+i)->nearest_block_index != -1) ) //can use sorted index to see which photons have been absorbed efficiently before printing and get the indexes
#else
if (((photon_hydro_coord[2]<(hydro_data->r2_domain)[1]) &&
(photon_hydro_coord[2]>(hydro_data->r2_domain)[0]) &&
(photon_hydro_coord[1]<(hydro_data->r1_domain)[1]) &&
(photon_hydro_coord[1]>(hydro_data->r1_domain)[0]) &&
(photon_hydro_coord[0]<(hydro_data->r0_domain)[1]) &&
(photon_hydro_coord[0]>(hydro_data->r0_domain)[0])) && ((ph+i)->nearest_block_index != -1) )
#endif
{
is_in_block=checkInBlock(photon_hydro_coord[0], photon_hydro_coord[1], photon_hydro_coord[2], hydro_data, ph_block_index);
//when rebinning photons can have comoving 4 momenta=0 and nearest_block_index=0 (and block 0 be the actual block the photon is in making it not refind the proper index and reclaulate the comoving 4 momenta) which can make counting synch scattered photons be thrown off, thus take care of this case by forcing the function to recalc things
#if CYCLOSYNCHROTRON_SWITCH == ON
if ((ph_block_index==0) && ( ((ph+i)->comv_p0)+((ph+i)->comv_p1)+((ph+i)->comv_p2)+((ph+i)->comv_p3) == 0 ) )
{
is_in_block=0; //say that photon is not in the block, force it to recompute things
}
#endif
if (find_nearest_block_switch==0 && is_in_block)
{
//keep the saved grid index
min_index=ph_block_index;
}
else
{
//find the new index of the block closest to the photon
//min_index=findNearestBlock(array_num, ph_x, ph_y, ph_z, x, y, z); //stop doing this one b/c nearest grid could be one that the photon isnt actually in due to adaptive mesh
//find the new index of the block that the photon is actually in
min_index=findContainingBlock(photon_hydro_coord[0], photon_hydro_coord[1], photon_hydro_coord[2], hydro_data, fPtr); //(array_num, ph_x, ph_y, ph_z, x, y, z, szx, szy, ph_block_index, find_nearest_block_switch, fPtr);
if (min_index != -1)
{
(ph+i)->nearest_block_index=min_index; //save the index if min_index != -1
//also recalculate the photons' comoving frequency in this new fluid element
ph_p[0]=((ph+i)->p0);
ph_p[1]=((ph+i)->p1);
ph_p[2]=((ph+i)->p2);
ph_p[3]=((ph+i)->p3);
#if DIMENSIONS == THREE
hydroVectorToCartesian(&fluid_beta, (hydro_data->v0)[min_index], (hydro_data->v1)[min_index], (hydro_data->v2)[min_index], (hydro_data->r0)[min_index], (hydro_data->r1)[min_index], (hydro_data->r2)[min_index]);
#elif DIMENSIONS == TWO_POINT_FIVE
ph_phi=atan2(((ph+i)->r1), ((ph+i)->r0));
hydroVectorToCartesian(&fluid_beta, (hydro_data->v0)[min_index], (hydro_data->v1)[min_index], (hydro_data->v2)[min_index], (hydro_data->r0)[min_index], (hydro_data->r1)[min_index], ph_phi);
#else
ph_phi=atan2(((ph+i)->r1), ((ph+i)->r0));
//this may have to change if PLUTO can save vectors in 3D when conidering 2D sim
hydroVectorToCartesian(&fluid_beta, (hydro_data->v0)[min_index], (hydro_data->v1)[min_index], 0, (hydro_data->r0)[min_index], (hydro_data->r1)[min_index], ph_phi);
#endif
lorentzBoost(&fluid_beta, &ph_p, &ph_p_comv, 'p', fPtr);
((ph+i)->comv_p0)=ph_p_comv[0];
((ph+i)->comv_p1)=ph_p_comv[1];
((ph+i)->comv_p2)=ph_p_comv[2];
((ph+i)->comv_p3)=ph_p_comv[3];
num_photons_find_new_element+=1;
}
else
{
fprintf(fPtr, "Photon number %d FLASH index not found, making sure it doesnt scatter.\n", i);
}
}
//if min_index!= -1 (know which fluid element photon is in) do all this stuff, otherwise make sure photon doesnt scatter
if (min_index != -1)
{
//fprintf(fPtr,"Min Index: %d\n", min_index);
//save values
(n_dens_lab_tmp)= (hydro_data->dens_lab)[min_index];//(*(dens_lab+min_index));
(n_temp_tmp)= (hydro_data->temp)[min_index];//(*(temp+min_index));
#if DIMENSIONS == THREE
hydroVectorToCartesian(&fluid_beta, (hydro_data->v0)[min_index], (hydro_data->v1)[min_index], (hydro_data->v2)[min_index], (hydro_data->r0)[min_index], (hydro_data->r1)[min_index], (hydro_data->r2)[min_index]);
#elif DIMENSIONS == TWO_POINT_FIVE
ph_phi=atan2(((ph+i)->r1), ((ph+i)->r0));
hydroVectorToCartesian(&fluid_beta, (hydro_data->v0)[min_index], (hydro_data->v1)[min_index], (hydro_data->v2)[min_index], (hydro_data->r0)[min_index], (hydro_data->r1)[min_index], ph_phi);
#else
ph_phi=atan2(((ph+i)->r1), ((ph+i)->r0));
//this may have to change if PLUTO can save vectors in 3D when conidering 2D sim
hydroVectorToCartesian(&fluid_beta, (hydro_data->v0)[min_index], (hydro_data->v1)[min_index], 0, (hydro_data->r0)[min_index], (hydro_data->r1)[min_index], ph_phi);
#endif
fl_v_x=fluid_beta[0];
fl_v_y=fluid_beta[1];
fl_v_z=fluid_beta[2];
fl_v_norm=sqrt(fl_v_x*fl_v_x+fl_v_y*fl_v_y+fl_v_z*fl_v_z);
ph_v_norm=sqrt(((ph+i)->p1)*((ph+i)->p1)+((ph+i)->p2)*((ph+i)->p2)+((ph+i)->p3)*((ph+i)->p3));
//(*(n_cosangle+i))=((fl_v_x* ((ph+i)->p1))+(fl_v_y* ((ph+i)->p2))+(fl_v_z* ((ph+i)->p3)))/(fl_v_norm*ph_v_norm ); //find cosine of the angle between the photon and the fluid velocities via a dot product
n_cosangle=((fl_v_x* ((ph+i)->p1))+(fl_v_y* ((ph+i)->p2))+(fl_v_z* ((ph+i)->p3)))/(fl_v_norm*ph_v_norm ); //make 1 for cylindrical otherwise its undefined
beta=sqrt(1.0-1.0/((hydro_data->gamma)[min_index]*(hydro_data->gamma)[min_index]));
//put this in to double check that random number is between 0 and 1 (exclusive) because there was a problem with this for parallel case
rnd_tracker=0;
#if defined(_OPENMP)
thread_id=omp_get_thread_num();
#endif
rnd_tracker=gsl_rng_uniform_pos(rng[thread_id]);
//printf("Rnd_tracker: %e Thread number %d \n",rnd_tracker, omp_get_thread_num() );
//mfp=(-1)*log(rnd_tracker)*(M_P/((n_dens_tmp))/(THOM_X_SECT)); ///(1.0-beta*((n_cosangle)))) ; // the mfp and then multiply it by the ln of a random number to simulate distribution of mean free paths IN COMOV FRAME for reference
mfp=(-1)*(M_P/((n_dens_lab_tmp))/THOM_X_SECT/(1.0-beta*n_cosangle))*log(rnd_tracker) ;
}
else
{
mfp=min_mfp;
}
}
else
{
mfp=min_mfp;
//fprintf(fPtr,"Photon %d In ELSE\n", i);
//exit(0);
}
*(all_time_steps+i)=mfp/C_LIGHT;
//fprintf(fPtr,"Photon %d has time %e\n", i, *(all_time_steps+i));
//fflush(fPtr);
}
//exit(0);
//free rand number generator
for (i=1;i<num_thread;i++)
{
gsl_rng_free(rng[i]);
}
free(rng);
//printf("HERE\n");
for (i=0;i<num_ph;i++)
{
*(sorted_indexes+i)= i; //save indexes to array to use in qsort
}
//printf("before QSORT\n");
#if (defined _GNU_SOURCE || defined __GNU__ || defined __linux__)
qsort_r(sorted_indexes, num_ph, sizeof (int), compare2, all_time_steps);
#elif (defined __APPLE__ || defined __MACH__ || defined __DARWIN__ || defined __FREEBSD__ || defined __BSD__ || defined OpenBSD3_1 || defined OpenBSD3_9)
qsort_r(sorted_indexes, num_ph, sizeof (int), all_time_steps, compare);
#else
#error Cannot detect operating system
#endif
//print number of times we had to refind the index of the elemtn photons were located in
if (find_nearest_block_switch!=0)
{
num_photons_find_new_element=0; //force this to be 0 since we forced MCRaT to find the indexes for all the photons here
}
return num_photons_find_new_element;
}
int compare (void *ar, const void *a, const void *b)
{
//from https://phoxis.org/2012/07/12/get-sorted-index-orderting-of-an-array/
int aa = *(int *) a;
int bb = *(int *) b;
double *arr=NULL;
arr=ar;
//printf("%d, %d\n", aa, bb);
//printf("%e, %e\n", arr[aa] , arr[bb]);
//return (aa - bb);
/*
if (arr[aa] < arr[bb])
return -1;
if (arr[aa] == arr[bb])
return 0;
if (arr[aa] > arr[bb])
return 1;
*/
return ((arr[aa] > arr[bb]) - (arr[aa] < arr[bb]));
}
int compare2 ( const void *a, const void *b, void *ar)
{
//have 2 compare funcions b/c of changes in qsort_r between BSD and GNU
//from https://phoxis.org/2012/07/12/get-sorted-index-orderting-of-an-array/
int aa = *(int *) a;
int bb = *(int *) b;
double *arr=NULL;
arr=ar;
return ((arr[aa] > arr[bb]) - (arr[aa] < arr[bb]));
}
int interpolatePropertiesAndMinMFP( struct photon *ph, int num_ph, int array_num, double *time_step, double *x, double *y, double *z, double *szx, double *szy, double *velx, double *vely, double *velz, double *dens_lab,\
double *temp, double *n_dens_lab, double *n_vx, double *n_vy, double *n_vz, double *n_temp, gsl_rng * rand, int find_nearest_block_switch, FILE *fPtr)
{
/*
* THIS FUNCTION IS WRITTEN JUST FOR 2D SIMS AS OF NOW, not used
*/
/*
int i=0, j=0, min_index=0, ph_block_index=0, thread_id=0;
int left_block_index=0, right_block_index=0, bottom_block_index=0, top_block_index=0, all_adjacent_block_indexes[4];
double ph_x=0, ph_y=0, ph_phi=0, ph_z=0, dist=0, left_dist_min=0, right_dist_min=0, top_dist_min=0, bottom_dist_min=0, dv=0, v=0;
double fl_v_x=0, fl_v_y=0, fl_v_z=0; //to hold the fluid velocity in MCRaT coordinates
double r=0, theta=0;
double ph_v_norm=0, fl_v_norm=0;
double n_cosangle=0, n_dens_lab_tmp=0,n_vx_tmp=0, n_vy_tmp=0, n_vz_tmp=0, n_temp_tmp=0;
double rnd_tracker=0, n_dens_lab_min=0, n_vx_min=0, n_vy_min=0, n_vz_min=0, n_temp_min=0;
int num_thread=2;//omp_get_max_threads();
bool is_in_block=0; //boolean to determine if the photon is outside of its previously noted block
int index=0;
double mfp=0,min_mfp=0, beta=0;
//initialize gsl random number generator fo each thread
const gsl_rng_type *rng_t;
gsl_rng **rng;
gsl_rng_env_setup();
rng_t = gsl_rng_ranlxs0;
rng = (gsl_rng **) malloc((num_thread ) * sizeof(gsl_rng *));
rng[0]=rand;
//#pragma omp parallel for num_threads(nt)
for(i=1;i<num_thread;i++)
{
rng[i] = gsl_rng_alloc (rng_t);
gsl_rng_set(rng[i],gsl_rng_get(rand));
}
//go through each photon and find the blocks around it and then get the distances to all of those blocks and choose the one thats the shortest distance away
//can optimize here, exchange the for loops and change condition to compare to each of the photons is the radius of the block is .95 (or 1.05) times the min (max) photon radius
//or just parallelize this part here
min_mfp=1e12;
#pragma omp parallel for num_threads(num_thread) firstprivate( r, theta,dv, v, all_adjacent_block_indexes, j, left_block_index, right_block_index, top_block_index, bottom_block_index, is_in_block, ph_block_index, ph_x, ph_y, ph_z, ph_phi, min_index, n_dens_lab_tmp,n_vx_tmp, n_vy_tmp, n_vz_tmp, n_temp_tmp, fl_v_x, fl_v_y, fl_v_z, fl_v_norm, ph_v_norm, n_cosangle, mfp, beta, rnd_tracker) private(i) shared(min_mfp )
for (i=0;i<num_ph; i++)
{
//printf("%d, %e,%e\n", i, ((ph+i)->r0), ((ph+i)->r1));
if (find_nearest_block_switch==0)
{
ph_block_index=(ph+i)->nearest_block_index; //if starting a new frame the number of indexes can change and cause a seg fault
}
else
{
ph_block_index=0; //if starting a new frame set index=0 to avoid this issue
}
//if (strcmp(DIM_SWITCH, dim_2d_str)==0)
#if DIMENSIONS == 2
{
ph_x=pow(pow(((ph+i)->r0),2.0)+pow(((ph+i)->r1),2.0), 0.5); //convert back to FLASH x coordinate
ph_y=((ph+i)->r2);
ph_phi=atan2(((ph+i)->r1), ((ph+i)->r0));
}
#else
{
ph_x=((ph+i)->r0);
ph_y=((ph+i)->r1);
ph_z=((ph+i)->r2);
}
#endif
//printf("ph_x:%e, ph_y:%e\n", ph_x, ph_y);
is_in_block=checkInBlock(ph_block_index, ph_x, ph_y, ph_z, x, y, z, szx, szy);
if (find_nearest_block_switch==0 && is_in_block)
{
//keep the saved grid index
min_index=ph_block_index;
}
else
{
//find the new index of the block closest to the photon
//min_index=findNearestBlock(array_num, ph_x, ph_y, ph_z, x, y, z); //stop doing this one b/c nearest grid could be one that the photon isnt actually in due to adaptive mesh
//find the new index of the block that the photon is actually in
//min_index=findContainingBlock(array_num, ph_x, ph_y, ph_z, x, y, z, szx, szy, ph_block_index, find_nearest_block_switch, fPtr);
(ph+i)->nearest_block_index=min_index; //save the index
}
//look for the blocks surounding the block of interest and order them by the
left_dist_min=1e15;//set dist to impossible value to make sure at least first distance calulated is saved
right_dist_min=1e15;
top_dist_min=1e15;
bottom_dist_min=1e15;
for (j=0;j<array_num;j++)
{
//if (strcmp(DIM_SWITCH, dim_2d_str)==0)
#if DIMENSIONS == 2
{
dist= pow(pow((*(x+min_index))- (*(x+j)), 2.0) + pow((*(y+min_index))- (*(y+j)) , 2.0),0.5);
}
#else
{
dist= pow(pow((*(x+min_index))- (*(x+j)), 2.0) + pow((*(y+min_index))- (*(y+j)),2.0 ) + pow((*(z+min_index))- (*(z+j)) , 2.0),0.5);
}
#endif
if ((*(x+j))<(*(x+min_index)) && (dist < left_dist_min) )
{
left_block_index=j;
left_dist_min=dist;
}
else if ((*(x+j))>(*(x+min_index)) && (dist < right_dist_min))
{
right_block_index=j;
right_dist_min=dist;
}
if ((*(y+j))<(*(y+min_index)) && (dist < bottom_dist_min) )
{
bottom_block_index=j;
bottom_dist_min=dist;
}
else if ((*(y+j))>(*(y+min_index)) && (dist < top_dist_min) )
{
top_block_index=j;
top_dist_min=dist;
}
}
all_adjacent_block_indexes[0]=left_block_index;
all_adjacent_block_indexes[1]=right_block_index;
all_adjacent_block_indexes[2]=bottom_block_index;
all_adjacent_block_indexes[3]=top_block_index;
//do a weighted average of the 4 nearest grids based on volume
v=0;
(n_dens_lab_tmp)=0;
(n_vx_tmp)= 0;
(n_vy_tmp)= 0;
(n_temp_tmp)= 0;
(n_vz_tmp)= 0;
for (j=0;j<4;j++)
{
#if SIM_SWITCH == RIKEN
{
r=pow(pow((*(x+all_adjacent_block_indexes[j])),2.0)+pow((*(y+all_adjacent_block_indexes[j])),2.0), 0.5);
theta=atan2((*(x+all_adjacent_block_indexes[j])), (*(y+all_adjacent_block_indexes[j])));
dv=2.0*M_PI*pow(r,2)*sin(theta)*(*(szx+all_adjacent_block_indexes[j]))*(*(szy+all_adjacent_block_indexes[j])) ;
}
#else
{
//using FLASH
dv=2.0*M_PI*(*(x+all_adjacent_block_indexes[j]))*pow(*(szx+all_adjacent_block_indexes[j]),2.0) ;
}
#endif
v+=dv;
//save values
(n_dens_lab_tmp)+= (*(dens_lab+all_adjacent_block_indexes[j]))*dv;
(n_vx_tmp)+= (*(velx+all_adjacent_block_indexes[j]))*dv;
(n_vy_tmp)+= (*(vely+all_adjacent_block_indexes[j]))*dv;
(n_temp_tmp)+= (*(temp+all_adjacent_block_indexes[j]))*dv;
//if (strcmp(DIM_SWITCH, dim_3d_str)==0)
#if DIMENSIONS == 3
{
(n_vz_tmp)+= (*(velz+all_adjacent_block_indexes[j]))*dv;
}
#endif
}
//fprintf(fPtr,"Outside\n");
//save values
(n_dens_lab_tmp)/= v;
(n_vx_tmp)/= v;
(n_vy_tmp)/= v;
(n_temp_tmp)/= v;
//if (strcmp(DIM_SWITCH, dim_3d_str)==0)
#if DIMENSIONS == 3
{
(n_vz_tmp)/= v;
}
#endif
//if (strcmp(DIM_SWITCH, dim_2d_str)==0)
#if DIMENSIONS == 2
{
fl_v_x=n_vx_tmp*cos(ph_phi);
fl_v_y=n_vx_tmp*sin(ph_phi);
fl_v_z=n_vy_tmp;
}
#else
{
fl_v_x=n_vx_tmp;
fl_v_y=n_vy_tmp;
fl_v_z=n_vz_tmp;
}
#endif
fl_v_norm=pow(pow(fl_v_x, 2.0)+pow(fl_v_y, 2.0)+pow(fl_v_z, 2.0), 0.5);
ph_v_norm=pow(pow(((ph+i)->p1), 2.0)+pow(((ph+i)->p2), 2.0)+pow(((ph+i)->p3), 2.0), 0.5);
//(*(n_cosangle+i))=((fl_v_x* ((ph+i)->p1))+(fl_v_y* ((ph+i)->p2))+(fl_v_z* ((ph+i)->p3)))/(fl_v_norm*ph_v_norm ); //find cosine of the angle between the photon and the fluid velocities via a dot product
(n_cosangle)=((fl_v_x* ((ph+i)->p1))+(fl_v_y* ((ph+i)->p2))+(fl_v_z* ((ph+i)->p3)))/(fl_v_norm*ph_v_norm ); //make 1 for cylindrical otherwise its undefined
//if (strcmp(DIM_SWITCH, dim_2d_str)==0)
#if DIMENSIONS == 2
{
beta=pow((pow((n_vx_tmp),2)+pow((n_vy_tmp),2)),0.5);
}
#else
{
beta=pow((pow((n_vx_tmp),2)+pow((n_vy_tmp),2)+pow((n_vz_tmp),2)),0.5);
}
#endif
//put this in to double check that random number is between 0 and 1 (exclusive) because there was a problem with this for parallel case
rnd_tracker=0;
#if defined(_OPENMP)
thread_id=omp_get_thread_num();
#endif
rnd_tracker=gsl_rng_uniform_pos(rng[thread_id]);
mfp=(-1)*(M_P/((n_dens_lab_tmp))/THOM_X_SECT/(1.0-beta*((n_cosangle))))*log(rnd_tracker) ; //calulate the mfp and then multiply it by the ln of a random number to simulate distribution of mean free paths
#pragma omp critical
if ( mfp<min_mfp)
{
min_mfp=mfp;
n_dens_lab_min= n_dens_lab_tmp;
n_vx_min= n_vx_tmp;
n_vy_min= n_vy_tmp;
//if (strcmp(DIM_SWITCH, dim_3d_str)==0)
#if DIMENSIONS == 3
{
n_vz_min= n_vz_tmp;
}
#endif
n_temp_min= n_temp_tmp;
index=i;
//fprintf(fPtr, "Thread is %d. new min: %e for photon %d with block properties: %e, %e, %e Located at: %e, %e, Dist: %e\n", omp_get_thread_num(), mfp, index, n_vx_tmp, n_vy_tmp, n_temp_tmp, *(x+min_index), *(y+min_index), dist_min);
//fflush(fPtr);
#pragma omp flush(min_mfp)
}
}
//free rand number generator
for (i=1;i<num_thread;i++)
{
gsl_rng_free(rng[i]);
}
free(rng);
*(n_dens_lab)= n_dens_lab_min;
*(n_vx)= n_vx_min;
*(n_vy)= n_vy_min;
//if (strcmp(DIM_SWITCH, dim_3d_str)==0)
#if DIMENSIONS == 3
{
*(n_vz)= n_vz_min;
}
#endif
*(n_temp)= n_temp_min;
(*time_step)=min_mfp/C_LIGHT;
return index;
*/
return 0;
}
void updatePhotonPosition(struct photon *ph, int num_ph, double t, FILE *fPtr)
{
//move photons by speed of light
int i=0;
#if defined(_OPENMP)
int num_thread=omp_get_num_threads();
#endif
double old_position=0, new_position=0, divide_p0=0;
#pragma omp parallel for num_threads(num_thread) firstprivate(old_position, new_position, divide_p0)
for (i=0;i<num_ph;i++)
{
if (((ph+i)->type != CS_POOL_PHOTON) && ((ph+i)->weight != 0))
{
old_position= sqrt(((ph+i)->r0)*((ph+i)->r0)+((ph+i)->r1)*((ph+i)->r1)+((ph+i)->r2)*((ph+i)->r2)); //uncommented checks since they were not necessary anymore
divide_p0=1.0/((ph+i)->p0);
((ph+i)->r0)+=((ph+i)->p1)*divide_p0*C_LIGHT*t; //update x position
((ph+i)->r1)+=((ph+i)->p2)*divide_p0*C_LIGHT*t;//update y
((ph+i)->r2)+=((ph+i)->p3)*divide_p0*C_LIGHT*t;//update z
new_position= sqrt(((ph+i)->r0)*((ph+i)->r0)+((ph+i)->r1)*((ph+i)->r1)+((ph+i)->r2)*((ph+i)->r2));
/*
if ((new_position-old_position)/t > C_LIGHT)
{
fprintf(fPtr, "PHOTON NUMBER %d IS SUPERLUMINAL. ITS SPEED IS %e c.\n", i, ((new_position-old_position)/t)/C_LIGHT);
}
*/
//if ( (ph+i)->s0 != 1)
{
// fprintf(fPtr, "PHOTON NUMBER %d DOES NOT HAVE I=1. Instead it is: %e\n", i, (ph+i)->s0);
}
//printf("In update function: %e, %e, %e, %e, %e, %e, %e\n",((ph+i)->r0), ((ph+i)->r1), ((ph+i)->r2), t, ((ph+i)->p1)/((ph+i)->p0), ((ph+i)->p2)/((ph+i)->p0), ((ph+i)->p3)/((ph+i)->p0) );
}
}
//printf("In update function: %e, %e, %e, %e\n",t, ((ph)->p1)/((ph)->p0), ((ph)->p2)/((ph)->p0), ((ph)->p3)/((ph)->p0) );
}
double photonEvent(struct photon *ph, int num_ph, double dt_max, double *all_time_steps, int *sorted_indexes, struct hydro_dataframe *hydro_data, int *scattered_ph_index, int *frame_scatt_cnt, int *frame_abs_cnt, gsl_rng * rand, FILE *fPtr)//(struct photon *ph, int num_ph, double dt_max, double *all_time_steps, int *sorted_indexes, double *all_flash_vx, double *all_flash_vy, double *all_flash_vz, double *all_fluid_temp, int *scattered_ph_index, int *frame_scatt_cnt, int *frame_abs_cnt, gsl_rng * rand, FILE *fPtr)
{
//function to perform single photon scattering
int i=0, index=0, ph_index=0, event_did_occur=0; //variable event_did_occur is to keep track of wether a scattering or absorption actually occured or not,
double scatt_time=0, old_scatt_time=0; //keep track of new time to scatter vs old time to scatter to know how much to incrementally propagate the photons if necessary
double phi=0, theta=0; //phi and theta for the 4 momentum
double ph_phi=0, flash_vx=0, flash_vy=0, flash_vz=0, fluid_temp=0;
double *ph_p=malloc(4*sizeof(double)); //pointer to hold only photon 4 momentum @ start
double *el_p_comov=malloc(4*sizeof(double));//pointer to hold the electron 4 momenta in comoving frame
double *ph_p_comov=malloc(4*sizeof(double));//pointer to hold the comoving photon 4 momenta
double *fluid_beta=malloc(3*sizeof(double));//pointer to hold fluid velocity vector
double *negative_fluid_beta=malloc(3*sizeof(double));//pointer to hold negative fluid velocity vector
double *s=malloc(4*sizeof(double)); //vector to hold the stokes parameters for a given photon
i=0;
old_scatt_time=0;
event_did_occur=0;
//fprintf(fPtr,"In this function Num_ph %d\n", num_ph);
//fflush(fPtr);
while (i<num_ph && event_did_occur==0 )
{
ph_index=(*(sorted_indexes+i));
scatt_time= *(all_time_steps+ph_index); //get the time until the photon scatters
//IF THE TIME IS GREATER THAN dt_max dont let the photons positions be updated
if (scatt_time<dt_max)
{
updatePhotonPosition(ph, num_ph, scatt_time-old_scatt_time, fPtr);
//fprintf(fPtr,"i: %d, Photon: %d, Delta t=%e\n", i, ph_index, scatt_time-old_scatt_time);
//fflush(fPtr);
//WHAT IF THE PHOTON MOVES TO A NEW BLOCK BETWEEN WHEN WE CALC MFP AND MOVE IT TO DO THE SCATTERING????
//it mostly happens at low optical depth, near the photosphere so we would have a large mfp anyways so we probably wouldn't be in this function in that case
index=(ph+ph_index)->nearest_block_index; //the sorted_indexes gives index of photon with smallest time to potentially scatter then extract the index of the block closest to that photon
fluid_temp=(hydro_data->temp)[index];
//if (strcmp(DIM_SWITCH, dim_3d_str)==0)
ph_phi=atan2(((ph+ph_index)->r1), (((ph+ph_index)->r0)));
/*
if (isnan((ph+ph_index)->r0) || isnan((ph+ph_index)->r1) || isnan((ph+ph_index)->r2))
{
printf("Not a number\n");
}
fprintf(fPtr,"ph_phi=%e\n", ph_phi);
fflush(fPtr);
*/
//convert flash coordinated into MCRaT coordinates
//printf("Getting fluid_beta\n");
#if DIMENSIONS == THREE
hydroVectorToCartesian(fluid_beta, (hydro_data->v0)[index], (hydro_data->v1)[index], (hydro_data->v2)[index], (hydro_data->r0)[index], (hydro_data->r1)[index], (hydro_data->r2)[index]);
#elif DIMENSIONS == TWO_POINT_FIVE
hydroVectorToCartesian(fluid_beta, (hydro_data->v0)[index], (hydro_data->v1)[index], (hydro_data->v2)[index], (hydro_data->r0)[index], (hydro_data->r1)[index], ph_phi);
#else
//this may have to change if PLUTO can save vectors in 3D when conidering 2D sim
hydroVectorToCartesian(fluid_beta, (hydro_data->v0)[index], (hydro_data->v1)[index], 0, (hydro_data->r0)[index], (hydro_data->r1)[index], ph_phi);
#endif
/*
fprintf(fPtr,"FLASH v: %e, %e\n", flash_vx,flash_vy);
fflush(fPtr);
*/
//fill in photon 4 momentum
*(ph_p+0)=((ph+ph_index)->p0);
*(ph_p+1)=((ph+ph_index)->p1);
*(ph_p+2)=((ph+ph_index)->p2);
*(ph_p+3)=((ph+ph_index)->p3);
//first we bring the photon to the fluid's comoving frame
//already have comoving 4 momentum
*(ph_p_comov+0)=((ph+ph_index)->comv_p0);
*(ph_p_comov+1)=((ph+ph_index)->comv_p1);
*(ph_p_comov+2)=((ph+ph_index)->comv_p2);
*(ph_p_comov+3)=((ph+ph_index)->comv_p3);
//fill in stokes parameters
*(s+0)=((ph+ph_index)->s0); //I ==1
*(s+1)=((ph+ph_index)->s1); //Q/I
*(s+2)=((ph+ph_index)->s2); //U/I
*(s+3)=((ph+ph_index)->s3); //V/I
/*
if (((ph+ph_index)->type) == COMPTONIZED_PHOTON)
{
fprintf(fPtr,"Unscattered Photon in Lab frame: %e, %e, %e,%e\n", *(ph_p+0), *(ph_p+1), *(ph_p+2), *(ph_p+3), (ph->r0), (ph->r1), (ph->r2), *(s+0), *(s+1), *(s+2), *(s+3));
fflush(fPtr);
fprintf(fPtr,"Fluid Beta: %e, %e, %e\n", *(fluid_beta+0),*(fluid_beta+1), *(fluid_beta+2));
fflush(fPtr);
}
fprintf(fPtr,"Old: %e, %e, %e,%e\n", ph->p0, ph->p1, ph->p2, ph->p3);
fflush(fPtr);
if (((ph+ph_index)->type) == COMPTONIZED_PHOTON)
{
fprintf(fPtr, "Before Scattering, In Comov_frame:\n");
fflush(fPtr);
fprintf(fPtr, "ph_comov: %e, %e, %e,%e\n", *(ph_p_comov+0), *(ph_p_comov+1), *(ph_p_comov+2), *(ph_p_comov+3));
fflush(fPtr);
}
*/
//then rotate the stokes plane by some angle such that we are in the stokes coordinat eystsem after the lorentz boost
#if STOKES_SWITCH == ON
{
stokesRotation(fluid_beta, (ph_p+1), (ph_p_comov+1), s, fPtr);
}
#endif
//exit(0);
//second we generate a thermal electron at the correct temperature
singleElectron(el_p_comov, fluid_temp, ph_p_comov, rand, fPtr);
/*
if (((ph+ph_index)->type) == COMPTONIZED_PHOTON)
{
fprintf(fPtr,"el_comov: %e, %e, %e,%e\n", *(el_p_comov+0), *(el_p_comov+1), *(el_p_comov+2), *(el_p_comov+3));
fflush(fPtr);
}
*/
//third we perform the scattering and save scattered photon 4 monetum in ph_p_comov @ end of function
event_did_occur=singleScatter(el_p_comov, ph_p_comov, s, rand, fPtr);
/*
if (((ph+ph_index)->type) == COMPTONIZED_PHOTON)
{
fprintf(fPtr,"After Scattering, After Lorentz Boost to Comov frame: %e, %e, %e,%e\n", *(ph_p_comov+0), *(ph_p_comov+1), *(ph_p_comov+2), *(ph_p_comov+3));
fflush(fPtr);
}
*/
if (event_did_occur==1)
{
//fprintf(fPtr,"Within the if!\n");
//fflush(fPtr);
//if the scattering occured have to uodate the phtoon 4 momentum. if photon didnt scatter nothing changes
//fourth we bring the photon back to the lab frame
*(negative_fluid_beta+0)=-1*( *(fluid_beta+0));
*(negative_fluid_beta+1)=-1*( *(fluid_beta+1));
*(negative_fluid_beta+2)=-1*( *(fluid_beta+2));
lorentzBoost(negative_fluid_beta, ph_p_comov, ph_p, 'p', fPtr);
/*
if (((ph+ph_index)->type) == COMPTONIZED_PHOTON)
{
fprintf(fPtr,"Scattered Photon in Lab frame: %e, %e, %e,%e\n\n", *(ph_p+0), *(ph_p+1), *(ph_p+2), *(ph_p+3));
fflush(fPtr);
}
*/
#if STOKES_SWITCH == ON
{
stokesRotation(negative_fluid_beta, (ph_p_comov+1), (ph_p+1), s, fPtr); //rotate to boost back to lab frame
//save stokes parameters
((ph+ph_index)->s0)= *(s+0); //I ==1
((ph+ph_index)->s1)= *(s+1);
((ph+ph_index)->s2)= *(s+2);
((ph+ph_index)->s3)= *(s+3);
}
#endif
if (((*(ph_p+0))*C_LIGHT/1.6e-9) > 1e4)
{
//energy greater than 1e4 keV
fprintf(fPtr,"Extremely High Photon Energy!!!!!!!!\n");
fflush(fPtr);
}
//fprintf(fPtr,"Old: %e, %e, %e,%e\n", ph->p0, ph->p1, ph->p2, ph->p3);
//fprintf(fPtr, "Old: %e, %e, %e,%e\n", *(ph_p_comov+0), *(ph_p_comov+1), *(ph_p_comov+2), *(ph_p_comov+3));
//assign the photon its new lab 4 momentum
((ph+ph_index)->p0)=(*(ph_p+0));
((ph+ph_index)->p1)=(*(ph_p+1));
((ph+ph_index)->p2)=(*(ph_p+2));
((ph+ph_index)->p3)=(*(ph_p+3));
//assign it the comoving frame 4 momentum
((ph+ph_index)->comv_p0)=(*(ph_p_comov+0));
((ph+ph_index)->comv_p1)=(*(ph_p_comov+1));
((ph+ph_index)->comv_p2)=(*(ph_p_comov+2));
((ph+ph_index)->comv_p3)=(*(ph_p_comov+3));
//printf("Done assigning values to original struct\n");
//incremement that photons number of scatterings
((ph+ph_index)->num_scatt)+=1;
*frame_scatt_cnt+=1; //incrememnt total number of scatterings
}
}
else
{
// if the photon scatt_time > dt_max
//have to adjust the time properly so that the time si now appropriate for the next frame
scatt_time=dt_max;
updatePhotonPosition(ph, num_ph, scatt_time-old_scatt_time, fPtr);
event_did_occur=1; //set equal to 1 to get out of the loop b/c other subsequent photons will have scatt_time > dt_max
}
old_scatt_time=scatt_time;
i++;
}
//exit(0);
*scattered_ph_index=ph_index; //save the index of the photon that was scattered
//fprintf(fPtr,"scattered_ph_index: %d %d\n", *scattered_ph_index, (*(sorted_indexes+i-1)));
//fflush(fPtr);
free(el_p_comov);
free(ph_p_comov);
free(fluid_beta);
free(negative_fluid_beta);
free(ph_p);
free(s);
ph_p=NULL;negative_fluid_beta=NULL;ph_p_comov=NULL; el_p_comov=NULL;
//retrun total time elapsed to scatter a photon
return scatt_time;
}
void singleElectron(double *el_p, double temp, double *ph_p, gsl_rng * rand, FILE *fPtr)
{
//generates an electron with random energy
double factor=0, gamma=0;
double y_dum=0, f_x_dum=0, x_dum=0, beta_x_dum=0, beta=0, phi=0, theta=0, ph_theta=0, ph_phi=0;
gsl_matrix *rot= gsl_matrix_calloc (3, 3); //create matrix thats 3x3 to do rotation
gsl_vector_view el_p_prime ; //create vector to hold rotated electron 4 momentum
gsl_vector *result=gsl_vector_alloc (3);
//fprintf(fPtr, "Temp in singleElectron: %e\n", temp);
if (temp>= 1e7)
{
//printf("In if\n");
factor=K_B*temp/(M_EL*C_LIGHT*C_LIGHT);
y_dum=1; //initalize loop to get a random gamma from the distribution of electron velocities
f_x_dum=0;
while ((isnan(f_x_dum) !=0) || (y_dum>f_x_dum) )
{
x_dum=gsl_rng_uniform_pos(rand)*(1+100*factor);
beta_x_dum=sqrt(1-(1/(x_dum*x_dum)));
y_dum=gsl_rng_uniform(rand)/2.0;
f_x_dum=x_dum*x_dum*(beta_x_dum/gsl_sf_bessel_Kn (2, 1.0/factor))*exp(-1*x_dum/factor); //
//fprintf(fPtr,"Choosing a Gamma: xdum: %e, f_x_dum: %e, y_dum: %e\n", x_dum, f_x_dum, y_dum);
}
gamma=x_dum;
}
else
{
//printf("In else\n");
factor=sqrt(K_B*temp/M_EL);
//calculate a random gamma from 3 random velocities drawn from a gaussian distribution with std deviation of "factor"
gamma=1.0/sqrt( 1- (pow(gsl_ran_gaussian(rand, factor)/C_LIGHT, 2)+ pow(gsl_ran_gaussian(rand, factor)/C_LIGHT, 2)+pow(gsl_ran_gaussian(rand, factor)/C_LIGHT, 2) )); //each vel direction is normal distribution -> maxwellian when multiplied
}
//fprintf(fPtr,"Chosen Gamma: %e\n",gamma);
beta=sqrt( 1- (1/(gamma*gamma)) );
//printf("Beta is: %e in singleElectron\n", beta);
phi=gsl_rng_uniform(rand)*2*M_PI;
y_dum=1; //initalize loop to get a random theta
f_x_dum=0;
while (y_dum>f_x_dum)
{
y_dum=gsl_rng_uniform(rand)*1.3;
x_dum=gsl_rng_uniform(rand)*M_PI;
f_x_dum=sin(x_dum)*(1-(beta*cos(x_dum)));
}
theta=x_dum;
//fprintf(fPtr,"Beta: %e\tPhi: %e\tTheta: %e\n",beta,phi, theta);
//fill in electron 4 momentum NOT SURE WHY THE ORDER IS AS SUCH SEEMS TO BE E/c, pz,py,px!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
*(el_p+0)=gamma*(M_EL)*(C_LIGHT);
*(el_p+1)=gamma*(M_EL)*(C_LIGHT)*beta*cos(theta);
*(el_p+2)=gamma*(M_EL)*(C_LIGHT)*beta*sin(theta)*sin(phi);
*(el_p+3)=gamma*(M_EL)*(C_LIGHT)*beta*sin(theta)*cos(phi);
//printf("Old: %e, %e, %e,%e\n", *(el_p+0), *(el_p+1), *(el_p+2), *(el_p+3));
el_p_prime=gsl_vector_view_array((el_p+1), 3);
//find angles of photon NOT SURE WHY WERE CHANGING REFERENCE FRAMES HERE???!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ph_phi=atan2(*(ph_p+2), *(ph_p+3)); //Double Check
ph_theta=atan2(sqrt( pow(*(ph_p+2),2)+ pow(*(ph_p+3),2)) , (*(ph_p+1)) );
//printf("Calculated Photon phi and theta in singleElectron:%e, %e\n", ph_phi, ph_theta);
//fill in rotation matrix to rotate around x axis to get rid of phi angle
gsl_matrix_set(rot, 1,1,1);
gsl_matrix_set(rot, 2,2,cos(ph_theta));
gsl_matrix_set(rot, 0,0,cos(ph_theta));
gsl_matrix_set(rot, 0,2,-sin(ph_theta));
gsl_matrix_set(rot, 2,0,sin(ph_theta));
gsl_blas_dgemv(CblasNoTrans, 1, rot, &el_p_prime.vector, 0, result);
/*
printf("Rotation Matrix 0: %e,%e, %e\n", gsl_matrix_get(rot, 0,0), gsl_matrix_get(rot, 0,1), gsl_matrix_get(rot, 0,2));
printf("Rotation Matrix 1: %e,%e, %e\n", gsl_matrix_get(rot, 1,0), gsl_matrix_get(rot, 1,1), gsl_matrix_get(rot, 1,2));
printf("Rotation Matrix 2: %e,%e, %e\n", gsl_matrix_get(rot, 2,0), gsl_matrix_get(rot, 2,1), gsl_matrix_get(rot, 2,2));
printf("Middle: %e, %e, %e,%e\n", *(el_p+0), gsl_vector_get(result,0), gsl_vector_get(result,1), gsl_vector_get(result,2));
*/
gsl_matrix_set_all(rot,0);
gsl_matrix_set(rot, 0,0,1);
gsl_matrix_set(rot, 1,1,cos(-ph_phi));
gsl_matrix_set(rot, 2,2,cos(-ph_phi));
gsl_matrix_set(rot, 1,2,-sin(-ph_phi));
gsl_matrix_set(rot, 2,1,sin(-ph_phi));
gsl_blas_dgemv(CblasNoTrans, 1, rot, result, 0, &el_p_prime.vector);
/*
printf("Rotation Matrix 0: %e,%e, %e\n", gsl_matrix_get(rot, 0,0), gsl_matrix_get(rot, 0,1), gsl_matrix_get(rot, 0,2));
printf("Rotation Matrix 1: %e,%e, %e\n", gsl_matrix_get(rot, 1,0), gsl_matrix_get(rot, 1,1), gsl_matrix_get(rot, 1,2));
printf("Rotation Matrix 2: %e,%e, %e\n", gsl_matrix_get(rot, 2,0), gsl_matrix_get(rot, 2,1), gsl_matrix_get(rot, 2,2));
printf("Final EL_P_vec: %e, %e, %e,%e\n", *(el_p+0), gsl_vector_get(&el_p_prime.vector,0), gsl_vector_get(&el_p_prime.vector,1), gsl_vector_get(&el_p_prime.vector,2));
*/
gsl_matrix_free (rot);gsl_vector_free(result);
}
double averagePhotonEnergy(struct photon *ph, int num_ph)
{
//to calculate weighted photon energy in ergs
int i=0;
#if defined(_OPENMP)
int num_thread=omp_get_num_threads();
#endif
double e_sum=0, w_sum=0;
#pragma omp parallel for reduction(+:e_sum) reduction(+:w_sum)
for (i=0;i<num_ph;i++)
{
#if CYCLOSYNCHROTRON_SWITCH == ON
if (((ph+i)->weight != 0)) //dont want account for null or absorbed UNABSORBED_CS_PHOTON photons
#endif
{
e_sum+=(((ph+i)->p0)*((ph+i)->weight));
w_sum+=((ph+i)->weight);
}
}
return (e_sum*C_LIGHT)/w_sum;
}
void phScattStats(struct photon *ph, int ph_num, int *max, int *min, double *avg, double *r_avg, FILE *fPtr )
{
int temp_max=0, temp_min=INT_MAX, i=0, count=0, count_synch=0, count_comp=0, count_i=0;
#if defined(_OPENMP)
int num_thread=omp_get_num_threads();
#endif
double sum=0, avg_r_sum=0, avg_r_sum_synch=0, avg_r_sum_comp=0, avg_r_sum_inject=0;
//printf("Num threads: %d", num_thread);
#pragma omp parallel for num_threads(num_thread) reduction(min:temp_min) reduction(max:temp_max) reduction(+:sum) reduction(+:avg_r_sum) reduction(+:count)
for (i=0;i<ph_num;i++)
{
#if CYCLOSYNCHROTRON_SWITCH == ON
if (((ph+i)->weight != 0)) //dont want account for null or absorbed UNABSORBED_CS_PHOTON photons
#endif
{
sum+=((ph+i)->num_scatt);
avg_r_sum+=sqrt(((ph+i)->r0)*((ph+i)->r0) + ((ph+i)->r1)*((ph+i)->r1) + ((ph+i)->r2)*((ph+i)->r2));
//printf("%d %c %e %e %e %e %e %e\n", i, (ph+i)->type, (ph+i)->p0, (ph+i)->comv_p0, (ph+i)->r0, (ph+i)->r1, (ph+i)->r2, (ph+i)->num_scatt);
if (((ph+i)->num_scatt) > temp_max )
{
temp_max=((ph+i)->num_scatt);
//printf("The new max is: %d\n", temp_max);
}
//if ((i==0) || (((ph+i)->num_scatt)<temp_min))
if (((ph+i)->num_scatt)<temp_min)
{
temp_min=((ph+i)->num_scatt);
//printf("The new min is: %d\n", temp_min);
}
if (((ph+i)->type) == INJECTED_PHOTON )
{
avg_r_sum_inject+=sqrt(((ph+i)->r0)*((ph+i)->r0) + ((ph+i)->r1)*((ph+i)->r1) + ((ph+i)->r2)*((ph+i)->r2));
count_i++;
}
if ((((ph+i)->type) == COMPTONIZED_PHOTON) || (((ph+i)->type) == UNABSORBED_CS_PHOTON))
{
avg_r_sum_comp+=sqrt(((ph+i)->r0)*((ph+i)->r0) + ((ph+i)->r1)*((ph+i)->r1) + ((ph+i)->r2)*((ph+i)->r2));
count_comp++;
}
count++;
}
if (((ph+i)->type) == CS_POOL_PHOTON )
{
avg_r_sum_synch+=sqrt(((ph+i)->r0)*((ph+i)->r0) + ((ph+i)->r1)*((ph+i)->r1) + ((ph+i)->r2)*((ph+i)->r2));
count_synch++;
}
}
fprintf(fPtr, "In this frame Avg r for i type: %e c and o type: %e and s type: %e\n", avg_r_sum_inject/count_i, avg_r_sum_comp/count_comp, avg_r_sum_synch/count_synch);
fflush(fPtr);
//exit(0);
*avg=sum/count;
*r_avg=avg_r_sum/count;
*max=temp_max;
*min=temp_min;
}
void cylindricalPrep(struct hydro_dataframe *hydro_data)
{
double gamma_infinity=100, t_comov=1e5, ddensity=3e-7;// the comoving temperature in Kelvin, and the comoving density in g/cm^2
int i=0;
double vel=sqrt(1-pow(gamma_infinity, -2.0)), lab_dens=gamma_infinity*ddensity;
for (i=0; i<hydro_data->num_elements; i++)
{
((hydro_data->gamma))[i]=gamma_infinity;
((hydro_data->dens))[i]=ddensity;
((hydro_data->dens_lab))[i]=lab_dens;
((hydro_data->pres))[i]=(A_RAD*pow(t_comov, 4.0))/(3);
((hydro_data->temp))[i]=t_comov; //just assign t_comov
#if DIMENSIONS == TWO || DIMENSIONS == TWO_POINT_FIVE
#if GEOMETRY == CARTESIAN || GEOMETRY == CYLINDRICAL
((hydro_data->v0))[i]=0;
((hydro_data->v1))[i]=vel; //geometry dependent want this to be parallel to jet axis
#endif
#if GEOMETRY == SPHERICAL
((hydro_data->v0))[i]=vel*cos(((hydro_data->r1))[i]);//rhat
((hydro_data->v1))[i]=-vel*sin(((hydro_data->r1))[i]);//theta hat direction
#endif
#if DIMENSIONS == TWO_POINT_FIVE
//have to make sure that the 3rd vctro direction is set to 0 in 2.5D case
((hydro_data->v2))[i]=0;
#endif
#else
#if GEOMETRY == CARTESIAN
((hydro_data->v0))[i]=0;
((hydro_data->v1))[i]=0;
((hydro_data->v2))[i]=vel;
#endif
#if GEOMETRY == SPHERICAL
((hydro_data->v0))[i]=vel*cos(((hydro_data->r1))[i]);//rhat
((hydro_data->v1))[i]=-vel*sin(((hydro_data->r1))[i]);//theta hat direction
((hydro_data->v2))[i]=0;
#endif
#if GEOMETRY == POLAR
((hydro_data->v0))[i]=0;
((hydro_data->v1))[i]=0;
((hydro_data->v2))[i]=vel;
#endif
#endif
}
}
void sphericalPrep(struct hydro_dataframe *hydro_data, FILE *fPtr)
{
double gamma_infinity=100, lumi=1e52, r00=1e8; //shopuld be 10^57
//double gamma_infinity=5, lumi=1e52, r00=1e8; //shopuld be 10^57
double vel=0, r=0;
int i=0;
for (i=0; i<hydro_data->num_elements; i++)
{
if (((hydro_data->r))[i] >= (r00*gamma_infinity))
{
((hydro_data->gamma))[i]=gamma_infinity;
((hydro_data->pres))[i]=(lumi*pow(r00, 2.0/3.0)*pow(((hydro_data->r))[i], -8.0/3.0) )/(12.0*M_PI*C_LIGHT*pow(gamma_infinity, 4.0/3.0));
}
else
{
((hydro_data->gamma))[i]=((hydro_data->r))[i]/r00;
((hydro_data->pres))[i]=(lumi*pow(r00, 2.0))/(12.0*M_PI*C_LIGHT*pow(((hydro_data->r))[i], 4.0) );
}
((hydro_data->dens))[i]=lumi/(4*M_PI*pow(((hydro_data->r))[i], 2.0)*pow(C_LIGHT, 3.0)*gamma_infinity*(((hydro_data->gamma))[i]));
((hydro_data->dens_lab))[i]=(((hydro_data->dens))[i])*(((hydro_data->gamma))[i]);
((hydro_data->temp))[i]=pow(3*(((hydro_data->pres))[i])/(A_RAD) ,1.0/4.0);
vel=sqrt(1-(pow(((hydro_data->gamma))[i], -2.0)));
#if DIMENSIONS == TWO || DIMENSIONS == TWO_POINT_FIVE
#if GEOMETRY == CARTESIAN || GEOMETRY == CYLINDRICAL
r=sqrt(pow(((hydro_data->r0))[i], 2)+ pow(((hydro_data->r1))[i], 2));
((hydro_data->v0))[i]=(vel*(((hydro_data->r0))[i]))/r;
((hydro_data->v1))[i]=(vel*(((hydro_data->r1))[i]))/r; //geometry dependent want this to be radial
#endif
#if GEOMETRY == SPHERICAL
((hydro_data->v0))[i]=vel;//rhat
((hydro_data->v1))[i]=0;//theta hat direction
#endif
#if DIMENSIONS == TWO_POINT_FIVE
//have to make sure that the 3rd vctro direction is set to 0 in 2.5D case
((hydro_data->v2))[i]=0;
#endif
#else
#if GEOMETRY == CARTESIAN
r=sqrt(pow(((hydro_data->r0))[i], 2)+ pow(((hydro_data->r1))[i], 2)+pow(((hydro_data->r2))[i], 2));
((hydro_data->v0))[i]=(vel*(((hydro_data->r0))[i]))/r;
((hydro_data->v1))[i]=(vel*(((hydro_data->r1))[i]))/r; //geometry dependent want this to be radial
((hydro_data->v2))[i]=(vel*(((hydro_data->r2))[i]))/r;
#endif
#if GEOMETRY == SPHERICAL
((hydro_data->v0))[i]=vel;//rhat
((hydro_data->v1))[i]=0;//theta hat direction
((hydro_data->v2))[i]=0;
#endif
#if GEOMETRY == POLAR
r=sqrt(pow(((hydro_data->r0))[i], 2)+ pow(((hydro_data->r2))[i], 2));
((hydro_data->v0))[i]=(vel*(((hydro_data->r0))[i]))/r; //need to figure this out
((hydro_data->v1))[i]=0;
((hydro_data->v2))[i]=(vel*(((hydro_data->r2))[i]))/r;
#endif
#endif
//fprintf(fPtr,"Gamma: %lf\nR: %lf\nPres: %e\nvel %lf\nX: %lf\nY %lf\nVx: %lf\nVy: %lf\nDens: %e\nLab_Dens: %e\nTemp: %lf\n", *(gamma+i), *(r+i), *(pres+i), vel, *(x+i), *(y+i), *(vx+i), *(vy+i), *(dens+i), *(dens_lab+i), *(temp+i));
}
}
void structuredFireballPrep(struct hydro_dataframe *hydro_data, FILE *fPtr)
{
//This model is provided by Lundman, Peer, Ryde 2014, use this to compare our MCRaT polarization to their polarizations
double gamma_0=100, lumi=1e52, r00=1e8, theta_j=1e-2, p=4; //theta_j in paper is 1e-2, 3e-2, 1e-1 and p is 1,2,4
double T_0=pow(lumi/(4*M_PI*r00*r00*A_RAD*C_LIGHT), 1.0/4.0);
double eta=0, r_sat=0, r;
double vel=0, theta_ratio=0;
int i=0;
for (i=0; i<hydro_data->num_elements; i++)
{
theta_ratio=((hydro_data->theta)[i])/theta_j;
eta=gamma_0/sqrt(1+pow(theta_ratio, 2*p));
if ((hydro_data->theta)[i] >= theta_j*pow(gamma_0/2, 1.0/p))
{
//*(gamma+i)=2; //outside with of shear layer have gamma be 2 like in paper
eta=2.0;
}
r_sat=eta*r00;
if (((hydro_data->r)[i]) >= r_sat)
{
(hydro_data->gamma)[i]=eta;
(hydro_data->temp)[i]=T_0*pow(r_sat/((hydro_data->r)[i]), 2.0/3.0)/eta;
}
else
{
(hydro_data->gamma)[i]=((hydro_data->r)[i])/r_sat; //not sure if this is right but it shouldn't matter since we're injecting our photons far from r00
(hydro_data->temp)[i]=T_0;
}
vel=sqrt(1-(pow((hydro_data->gamma)[i], -2.0)));
(hydro_data->dens)[i] = M_P*lumi/(4*M_PI*M_P*C_LIGHT*C_LIGHT*C_LIGHT*eta*vel*((hydro_data->gamma)[i])*((hydro_data->r)[i])*((hydro_data->r)[i])); //equation paper has extra c, but then units dont work out
(hydro_data->dens_lab)[i]=((hydro_data->dens)[i])*((hydro_data->gamma)[i]);
(hydro_data->pres)[i]=(A_RAD*pow((hydro_data->temp)[i], 4.0))/(3);
#if DIMENSIONS == TWO || DIMENSIONS == TWO_POINT_FIVE
#if GEOMETRY == CARTESIAN || GEOMETRY == CYLINDRICAL
r=sqrt(pow(((hydro_data->r0))[i], 2)+ pow(((hydro_data->r1))[i], 2));
((hydro_data->v0))[i]=(vel*(((hydro_data->r0))[i]))/r;
((hydro_data->v1))[i]=(vel*(((hydro_data->r1))[i]))/r; //geometry dependent want this to be radial
#endif
#if GEOMETRY == SPHERICAL
((hydro_data->v0))[i]=vel;//rhat
((hydro_data->v1))[i]=0;//theta hat direction
#endif
#if DIMENSIONS == TWO_POINT_FIVE
//have to make sure that the 3rd vctro direction is set to 0 in 2.5D case
((hydro_data->v2))[i]=0;
#endif
#else
#if GEOMETRY == CARTESIAN
r=sqrt(pow(((hydro_data->r0))[i], 2)+ pow(((hydro_data->r1))[i], 2)+pow(((hydro_data->r2))[i], 2));
((hydro_data->v0))[i]=(vel*(((hydro_data->r0))[i]))/r;
((hydro_data->v1))[i]=(vel*(((hydro_data->r1))[i]))/r; //geometry dependent want this to be radial
((hydro_data->v2))[i]=(vel*(((hydro_data->r2))[i]))/r;
#endif
#if GEOMETRY == SPHERICAL
((hydro_data->v0))[i]=vel;//rhat
((hydro_data->v1))[i]=0;//theta hat direction
((hydro_data->v2))[i]=0;
#endif
#if GEOMETRY == POLAR
r=sqrt(pow(((hydro_data->r0))[i], 2)+ pow(((hydro_data->r2))[i], 2));
((hydro_data->v0))[i]=(vel*(((hydro_data->r0))[i]))/r;
((hydro_data->v1))[i]=0;
((hydro_data->v2))[i]=(vel*(((hydro_data->r2))[i]))/r;
#endif
#endif
//fprintf(fPtr,"eta: %lf\nr_sat: %lf\nGamma: %lf\nR: %lf\nTheta: %lf\nPres: %e\nvel %lf\nX: %lf\nY %lf\nVx: %lf\nVy: %lf\nDens: %e\nLab_Dens: %e\nTemp: %lf\n\n", eta, r_sat, *(gamma+i), *(r+i), (*(theta+i)), *(pres+i), vel, *(x+i), *(y+i), *(vx+i), *(vy+i), *(dens+i), *(dens_lab+i), *(temp+i));
}
}
void phMinMax(struct photon *ph, int ph_num, double *min, double *max, double *min_theta, double *max_theta, FILE *fPtr)
{
double temp_r_max=0, temp_r_min=DBL_MAX, temp_theta_max=0, temp_theta_min=DBL_MAX;
int i=0;
#if defined(_OPENMP)
int num_thread=omp_get_num_threads();
#endif
double ph_r=0, ph_theta=0;
#pragma omp parallel for num_threads(num_thread) firstprivate(ph_r, ph_theta) reduction(min:temp_r_min) reduction(max:temp_r_max) reduction(min:temp_theta_min) reduction(max:temp_theta_max)
for (i=0;i<ph_num;i++)
{
if ((ph+i)->weight != 0)
{
ph_r=sqrt(((ph+i)->r0)*((ph+i)->r0) + ((ph+i)->r1)*((ph+i)->r1) + ((ph+i)->r2)*((ph+i)->r2));
ph_theta=acos(((ph+i)->r2) /ph_r); //this is the photons theta psition in the FLASH grid, gives in radians
if (ph_r > temp_r_max )
{
temp_r_max=ph_r;
//fprintf(fPtr, "The new max is: %e from photon %d with x: %e y: %e z: %e\n", temp_r_max, i, ((ph+i)->r0), (ph+i)->r1, (ph+i)->r2);
}
//if ((i==0) || (ph_r<temp_r_min))
if (ph_r<temp_r_min)
{
temp_r_min=ph_r;
//fprintf(fPtr, "The new min is: %e from photon %d with x: %e y: %e z: %e\n", temp_r_min, i, ((ph+i)->r0), (ph+i)->r1, (ph+i)->r2);
}
if (ph_theta > temp_theta_max )
{
temp_theta_max=ph_theta;
//fprintf(fPtr, "The new max is: %e from photon %d with x: %e y: %e z: %e\n", temp_r_max, i, ((ph+i)->r0), (ph+i)->r1, (ph+i)->r2);
}
//if ((i==0) || (ph_r<temp_r_min))
if (ph_theta<temp_theta_min)
{
temp_theta_min=ph_theta;
//fprintf(fPtr, "The new min is: %e from photon %d with x: %e y: %e z: %e\n", temp_r_min, i, ((ph+i)->r0), (ph+i)->r1, (ph+i)->r2);
}
}
}
*max=temp_r_max;
*min=temp_r_min;
*max_theta=temp_theta_max;
*min_theta=temp_theta_min;
}
|
model_initializer.h | // -----------------------------------------------------------------------------
//
// Copyright (C) 2021 CERN & University of Surrey for the benefit of the
// BioDynaMo collaboration. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef CORE_MODEL_INITIALIZER_H_
#define CORE_MODEL_INITIALIZER_H_
#include <Math/DistFunc.h>
#include <omp.h>
#include <ctime>
#include <string>
#include <vector>
#include "core/container/math_array.h"
#include "core/diffusion/diffusion_grid.h"
#include "core/resource_manager.h"
#include "core/simulation.h"
#include "core/util/random.h"
class EulerGrid;
class RungeKuttaGrid;
namespace bdm {
struct ModelInitializer {
/// Creates a 3D cubic grid of agents and adds them to the
/// ExecutionContext. Type of the agent is determined by the return
/// type of parameter agent_builder.
///
/// ModelInitializer::Grid3D(8, 10, [](const Double3& pos){
/// return Cell(pos); });
/// @param agents_per_dim number of agents on each axis.
/// Number of generated agents =
/// `agents_per_dim ^ 3`
/// @param space space between the positions - e.g space = 10:
/// positions = `{(0, 0, 0), (0, 0, 10), (0, 0,
/// 20), ... }`
/// @param agent_builder function containing the logic to instantiate a
/// new agent. Takes `const
/// Double3&` as input parameter
///
template <typename Function>
static void Grid3D(size_t agents_per_dim, double space,
Function agent_builder) {
#pragma omp parallel
{
auto* sim = Simulation::GetActive();
auto* ctxt = sim->GetExecutionContext();
#pragma omp for
for (size_t x = 0; x < agents_per_dim; x++) {
auto x_pos = x * space;
for (size_t y = 0; y < agents_per_dim; y++) {
auto y_pos = y * space;
for (size_t z = 0; z < agents_per_dim; z++) {
auto* new_agent = agent_builder({x_pos, y_pos, z * space});
ctxt->AddAgent(new_agent);
}
}
}
}
}
/// Creates a 3D grid of agents and adds them to the
/// ExecutionContext. Type of the agent is determined by the return
/// type of parameter agent_builder.
///
/// ModelInitializer::Grid3D({8,6,4}, 10, [](const Double3&
/// pos){ return Cell(pos); });
/// @param agents_per_dim number of agents on each axis.
/// Number of generated agents =
/// `agents_per_dim[0] * agents_per_dim[1] *
/// agents_per_dim[2]`
/// @param space space between the positions - e.g space = 10:
/// positions = `{(0, 0, 0), (0, 0, 10), (0, 0,
/// 20), ... }`
/// @param agent_builder function containing the logic to instantiate a
/// new agent. Takes `const
/// Double3&` as input parameter
///
template <typename Function>
static void Grid3D(const std::array<size_t, 3>& agents_per_dim, double space,
Function agent_builder) {
#pragma omp parallel
{
auto* sim = Simulation::GetActive();
auto* ctxt = sim->GetExecutionContext();
#pragma omp for
for (size_t x = 0; x < agents_per_dim[0]; x++) {
auto x_pos = x * space;
for (size_t y = 0; y < agents_per_dim[1]; y++) {
auto y_pos = y * space;
for (size_t z = 0; z < agents_per_dim[2]; z++) {
auto* new_agent = agent_builder({x_pos, y_pos, z * space});
ctxt->AddAgent(new_agent);
}
}
}
}
}
/// Creates agents on the given positions and adds them to the
/// ExecutionContext.
///
/// @param positions positions of the agents to be
/// @param agent_builder function containing the logic to instantiate a
/// new agent. Takes `const
/// Double3&` as input parameter
///
template <typename Function>
static void CreateAgents(const std::vector<Double3>& positions,
Function agent_builder) {
#pragma omp parallel
{
auto* sim = Simulation::GetActive();
auto* ctxt = sim->GetExecutionContext();
#pragma omp for
for (size_t i = 0; i < positions.size(); i++) {
auto* new_agent =
agent_builder({positions[i][0], positions[i][1], positions[i][2]});
ctxt->AddAgent(new_agent);
}
}
}
/// Creates agents with random positions and adds them to the
/// ExecutionContext. Agent creation is parallelized.
///
/// @param[in] min The minimum position value
/// @param[in] max The maximum position value
/// @param[in] num_agents The number agents
/// @param[in] agent_builder function containing the logic to instantiate a
/// new agent. Takes `const
/// Double3&` as input parameter
/// \param[in] rng Uses the given DistributionRng.
/// if rng is a nullptr, this function uses a
/// uniform distribution between [min, max[
template <typename Function>
static void CreateAgentsRandom(double min, double max, uint64_t num_agents,
Function agent_builder,
DistributionRng<double>* rng = nullptr) {
#pragma omp parallel
{
auto* sim = Simulation::GetActive();
auto* ctxt = sim->GetExecutionContext();
auto* random = sim->GetRandom();
#pragma omp for
for (uint64_t i = 0; i < num_agents; i++) {
if (rng != nullptr) {
Double3 pos;
bool in_range = false;
do {
pos = rng->Sample3();
in_range = (pos[0] >= min) && (pos[0] <= max) && (pos[1] >= min) &&
(pos[1] <= max) && (pos[2] >= min) && (pos[2] <= max);
} while (!in_range);
auto* new_agent = agent_builder(pos);
ctxt->AddAgent(new_agent);
} else {
auto* new_agent = agent_builder(random->UniformArray<3>(min, max));
ctxt->AddAgent(new_agent);
}
}
}
}
/// Creates agents on surface and adds them to the ExecutionContext.
/// The x and y positions are defined by xmin, xmax, deltax and ymin, ymax,
/// deltay. The z position is calculated using `f`. Agent creation is
/// parallelized.
///
/// auto construct = [](const Double3& position) {
/// Cell* cell = new Cell(position);
/// cell->SetDiameter(10);
/// return cell;
/// };
/// auto f = [](const double* x, const double* params) {
/// // 10 * sin(x/20) + 10 * sin(y/20)
/// return 10 * std::sin(x[0] / 20.) + 10 * std::sin(x[1] / 20.0);
/// };
/// ModelInitializer::CreateAgentsOnSurface(f, {}, -100, 100, 10, -100,
/// 100, 10, construct);
///
/// \param[in] f function that defines the surface
/// \param[in] fn_params Parameters that will be passed to `f` as
/// second argument.
/// @param[in] xmin Minimum x coordinate on which a agent will be
/// created.
/// @param[in] xmax Maximum x coordinate on which a agent will be
/// created.
/// @param[in] deltax Space between two agents on the x-axis.
/// @param[in] ymin Minimum y coordinate on which a agent will be
/// created.
/// @param[in] ymax Maximum y coordinate on which a agent will be
/// created.
/// @param[in] deltay Space between two agents on the y-axis.
/// @param[in] agent_builder function containing the logic to instantiate a
/// new agent. Takes `const Double3&` as input
/// parameter
template <typename Function>
static void CreateAgentsOnSurface(
double (*f)(const double*, const double*),
const FixedSizeVector<double, 10>& fn_params, double xmin, double xmax,
double deltax, double ymin, double ymax, double deltay,
Function agent_builder) {
#pragma omp parallel
{
auto* sim = Simulation::GetActive();
auto* ctxt = sim->GetExecutionContext();
auto xiterations =
static_cast<uint64_t>(std::floor((xmax - xmin) / deltax));
auto yiterations =
static_cast<uint64_t>(std::floor((ymax - ymin) / deltay));
#pragma omp for
for (uint64_t xit = 0; xit < xiterations; ++xit) {
double x = xmin + xit * deltax;
for (uint64_t yit = 0; yit < yiterations; ++yit) {
double y = ymin + yit * deltay;
Double3 pos = {x, y};
pos[2] = f(pos.data(), fn_params.data());
ctxt->AddAgent(agent_builder(pos));
}
}
}
}
/// Creates agents on surface and adds them to the ExecutionContext.
/// The x and y positions are determined by a uniform distribution [xmin,
/// xmax[ and [ymin, ymax[. The z position is calculated using `f`. Agent
/// creation is parallelized.
///
/// auto construct = [](const Double3& position) {
/// Cell* cell = new Cell(position);
/// cell->SetDiameter(10);
/// return cell;
/// };
/// auto f = [](const double* x, const double* params) {
/// // 10 * sin(x/20) + 10 * sin(y/20)
/// return 10 * std::sin(x[0] / 20.) + 10 * std::sin(x[1] / 20.0);
/// };
/// ModelInitializer::CreateAgentsOnSurfaceRndm(f, {}, -100, 100, -100,
/// 100, construct);
///
/// \param[in] f function that defines the surface
/// \param[in] fn_params Parameters that will be passed to `f` as
/// second argument.
/// @param[in] xmin Minimum x coordinate on which a agent will be
/// created.
/// @param[in] xmax Maximum x coordinate on which a agent will be
/// created.
/// @param[in] ymin Minimum y coordinate on which a agent will be
/// created.
/// @param[in] ymax Maximum y coordinate on which a agent will be
/// created.
/// @param[in] agent_builder function containing the logic to instantiate a
/// new agent. Takes `const Double3&` as input
/// parameter
template <typename Function>
static void CreateAgentsOnSurfaceRndm(
double (*f)(const double*, const double*),
const FixedSizeVector<double, 10>& fn_params, double xmin, double xmax,
double ymin, double ymax, uint64_t num_agents, Function agent_builder) {
#pragma omp parallel
{
auto* sim = Simulation::GetActive();
auto* ctxt = sim->GetExecutionContext();
auto* random = sim->GetRandom();
#pragma omp for
for (uint64_t i = 0; i < num_agents; ++i) {
Double3 pos = {random->Uniform(xmin, xmax),
random->Uniform(ymin, ymax)};
pos[2] = f(pos.data(), fn_params.data());
ctxt->AddAgent(agent_builder(pos));
}
}
}
/// Creates agents with random positions on a sphere and adds them to the
/// ExecutionContext. The agents' positions are uniformly distributed accross
/// the surface. Agent creation is parallelized.
/// Algorithm: Knop, 1970, 10.1145/362349.362377 (doi).
///
/// \param[in] center Center of the sphere
/// \param[in] radius Radius of the sphere
/// @param[in] num_agents The number of agents
/// @param[in] agent_builder function containing the logic to instantiate a
/// new agent. Takes `const
/// Double3&` as input parameter
template <typename Function>
static void CreateAgentsOnSphereRndm(const Double3& center, double radius,
uint64_t num_agents,
Function agent_builder) {
#pragma omp parallel
{
auto* sim = Simulation::GetActive();
auto* ctxt = sim->GetExecutionContext();
auto* random = sim->GetRandom();
#pragma omp for
for (uint64_t i = 0; i < num_agents; i++) {
auto pos = random->Sphere(radius) + center;
auto* new_agent = agent_builder(pos);
ctxt->AddAgent(new_agent);
}
}
}
/// Creates agents with random positions in a sphere and adds them to the
/// ExecutionContext. Agents are distributed uniformly inside the sphere.
/// Agent creation is parallelized. Algorithm: Knop,
/// 1970, 10.1145/362349.362377 (doi).
///
/// \param[in] center Center of the sphere
/// \param[in] radius Radius of the sphere
/// @param[in] num_agents The number of agents
/// @param[in] agent_builder function containing the logic to instantiate a
/// new agent. Takes `const
/// Double3&` as input parameter
template <typename Function>
static void CreateAgentsInSphereRndm(const Double3& center, double radius,
uint64_t num_agents,
Function agent_builder) {
// We use a probability density function (PDF) to model the probability of
// an agent to occur at a distance `r>=0` of the center. As the surface of
// a sphere scales as `r^2`, the PDF does as well. Thus
// `p(r)=a*r^2*\Theta(R-r)`, where `\Theta` is a heavyside function and R is
// largest allowed radius (interpretation: no agents outside the sphere). We
// can fix `a` by requiring `\int_0^\inf p(r') dr' = 1` and obtain
// `a=3/R^3`.
auto radial_pdf_sphere = [](const double* x, const double* params) {
double R{params[0]};
double r{x[0]};
if (r > 0.0 && r <= R) {
return 3.0 * std::pow(r, 2.0) / std::pow(R, 3.0);
} else {
return 0.0;
}
};
// Get a random number generator to sample from our PDF.
auto* random = Simulation::GetActive()->GetRandom();
auto rng =
random->GetUserDefinedDistRng1D(radial_pdf_sphere, {radius}, 0, radius);
// Create a random radius for each of the agents. Note: this is done
// serially because we GetUserDefinedDistRng1D does not work in parallel
// regions at the moment.
std::vector<double> random_radius;
random_radius.resize(num_agents);
for (size_t i = 0; i < num_agents; i++) {
random_radius[i] = rng.Sample();
}
#pragma omp parallel shared(random_radius)
{
auto* ctxt_tl = Simulation::GetActive()->GetExecutionContext();
#pragma omp for schedule(static)
for (uint64_t i = 0; i < num_agents; i++) {
auto pos = random->Sphere(random_radius[i]) + center;
auto* new_agent = agent_builder(pos);
ctxt_tl->AddAgent(new_agent);
}
}
}
/// Allows agents to secrete the specified substance. Diffusion throughout the
/// simulation space is automatically taken care of by the DiffusionGrid class
///
/// @param[in] substance_id The substance identifier
/// @param[in] substance_name The substance name
/// @param[in] diffusion_coeff The diffusion coefficient
/// @param[in] decay_constant The decay constant
/// @param[in] resolution The resolution of the diffusion grid
///
static void DefineSubstance(size_t substance_id,
const std::string& substance_name,
double diffusion_coeff, double decay_constant,
int resolution = 10);
template <typename F>
static void InitializeSubstance(size_t substance_id, F function) {
auto* sim = Simulation::GetActive();
auto* rm = sim->GetResourceManager();
auto diffusion_grid = rm->GetDiffusionGrid(substance_id);
diffusion_grid->AddInitializer(function);
}
};
} // namespace bdm
#endif // CORE_MODEL_INITIALIZER_H_
|
clt_thread.c | /*------------------------------------------------------------------------------
| SOURCE: clt_thread.c
|
| AUTHOR: Alex Zielinski
|
| DESC: Module that represents the client program. The program takes
| 3 additional cmd arguments:
| - host IP
| - host port
| - number of clients/threads to create
|
| Usage: ./clt <HOST IP> <PORT> <NUM OF CLIENTS>
|
| The user must specify the number of clients the program will
| create. The program will then create a seperate thread for each
| client in order to simulate multiple client connections to the
| server.
------------------------------------------------------------------------------*/
#include "../include/clt_thread.h"
#include "../include/socket.h"
#include "../include/log.h"
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <arpa/inet.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <time.h>
#include <sys/time.h>
#include <omp.h>
#include <pthread.h>
/*==============================================================================
| FUNCTION: int main(int argc, char **argv)
| argc : number of cmd args
| **argv : array of args
|
| RETURN: 0 on success
|
| DATE: Feb 13, 2018
|
| AUTHOR: Alex Zielinski
|
| DESC: Main entry point of the program.
==============================================================================*/
int main(int argc, char **argv)
{
if(!valid_args(argc, argv[ARG_PORT], argv[ARG_CLTS])) // check for valid args
exit(1);
if(app_clt_hdr() == -1) // append header to client log file
exit(1);
struct clt_nw_var nw_var;
get_host_info(&nw_var, argv[ARG_IP], argv[ARG_PORT]);
int num_of_clts = atoi(argv[ARG_CLTS]); // get number of client to create
omp_set_num_threads(num_of_clts);
#pragma omp parallel
{
spawn_clients(argv[ARG_IP], argv[ARG_PORT]);
}
return 0;
}
/*------------------------------------------------------------------------------
| FUNCTION: int valid_args(int arg, char *port, char *clients)
| arg : number of cmd args
| *port : port arg
| *clients : number of clients arg
|
| RETURN: 1 on true, 0 on false
|
| DATE: Feb 13, 2018
|
| AUTHOR: Alex Zielinski
|
| DESC: Checks for valid arguments (valid number of args, port and
| number of clients). Returns true (1) if args are valid,
| otherwise returns false (0).
------------------------------------------------------------------------------*/
int valid_args(int arg, char *port, char *clients)
{
// check valid number of args (4)
if(arg != ARGSNUM)
{
printf("\nUsage: ./clt <HOST IP> <PORT> <NUM OF CLIENTS>\n\n");
return 0;
}
// check for valid port
for(int i = 0; port[i] != '\0'; i++)
{
if(!isdigit(port[i]))
{
printf("\nError: Invalid port: %s.\n\n", port);
return 0;
}
}
// check for valid num of clients
for(int i = 0; clients[i] != '\0'; i++)
{
if(!isdigit(clients[i]))
{
printf("\nError: Invalid number of clients: %s.\n\n", port);
return 0;
}
}
return 1;
}
/*------------------------------------------------------------------------------
| FUNCTION: int connect_to_host(struct clt_nw_var *nw)
| *nw : pointer to clients network variables
|
| RETURN: 0 on success, -1 on failure
|
| DATE: Feb 13, 2018
|
| AUTHOR: Alex Zielinski
|
| DESC: High level function that connects the client program to a host
| machine. Uses variables held in '*nw' to create a socket and
| connect the socket to a host machine.
------------------------------------------------------------------------------*/
int connect_to_host(struct clt_nw_var *nw)
{
if(create_socket(&(nw->sd), AF_INET, SOCK_STREAM, 0) == -1)
return -1;
bzero((char *)&(nw->h_addr), sizeof(struct sockaddr_in));
fill_addr(&(nw->h_addr), AF_INET, nw->h_port, nw->h_ip);
if(connect_socket(nw->sd, (struct sockaddr *)&(nw->h_addr), sizeof(nw->h_addr)) == -1)
return -1;
return 0;
}
/*------------------------------------------------------------------------------
| FUNCTION: int send_loop(struct clt_nw_var nw)
| nw : clients network variables
|
| RETURN: 0 on success, -1 on failure
|
| DATE: Feb 13, 2018
|
| AUTHOR: Alex Zielinski
|
| DESC: Function to initiate send loop. Clients keeps sending a packet
| of PKTSIZE and reading the echo from the server until TIMEOUT
| has occured.
------------------------------------------------------------------------------*/
int send_loop(struct clt_nw_var nw)
{
struct timeval _tt1;
struct timeval _tt2;
struct clt_log_stats _stats;
char _send_buff[PKTSIZE];
char _recv_buff[PKTSIZE];
double _elapsed_time;
double _avg_time = 0;
int _bytes_recv;
int _bytes_sent;
time_t _t = time(NULL);
time_t _t1;
time_t _t2;
memset(_send_buff, 'A', PKTSIZE);
time(&_t1); // get current time (for timeout)
_stats.tm = *localtime(&_t); // time of new connection
_stats.requests = 0;
init_bytes_struct(&(_stats.bytes));
// send loop (unitl timeout)
while(1)
{
gettimeofday(&_tt1, NULL); // start timer
// send oacket
if ((_bytes_sent = send(nw.sd, _send_buff, PKTSIZE, 0)) == -1)
{
printf("\tError sending\n");
printf("\tError code: %s\n\n", strerror(errno));
return -1;
}
_stats.requests++; // update client requests
// read echo
if((_bytes_recv = recv(nw.sd, _recv_buff, PKTSIZE, MSG_WAITALL)) == -1)
{
printf("\tClient %d error reading\n", omp_get_thread_num());
printf("\tError code: %s\n\n", strerror(errno));
return -1;
}
else if(_bytes_recv == 0) // server shutdown
{
printf("\nServer shutdown\n\n");
break;
}
else // success
{
update_bytes_struct(&_stats.bytes, _bytes_recv);
bzero(_recv_buff, sizeof(_recv_buff));
}
gettimeofday(&_tt2, NULL); // stop timer
_elapsed_time = (_tt2.tv_sec - _tt1.tv_sec) * 1000.0;
_elapsed_time += (_tt2.tv_usec - _tt1.tv_usec) / 1000.0; // in milliseconds
_avg_time += _elapsed_time;
// check for timeout
time(&_t2);
if(difftime(_t2, _t1) > TIMEOUT)
break;
}
_avg_time = _avg_time / _stats.requests;
printf("- Client %d: Disconnecting\n", omp_get_thread_num());
close(nw.sd);
append_clt_data(_stats, _avg_time);
return 0;
}
/*------------------------------------------------------------------------------
| FUNCTION: void spawn_clients(char *ip, char *port)
| *ip : cmd arg that holds the servers IP
| *port : cmd arg that holds teh servers listening port
|
| RETURN: void
|
| DATE: Feb 13, 2018
|
| AUTHOR: Alex Zielinski
|
| DESC: high level function that is called by openmp. connects to a host
| specified by '*ip' and '*port'. Once connected it calls the
| send_loop function in order to initiate data transfer.
------------------------------------------------------------------------------*/
void spawn_clients(char *ip, char *port)
{
struct clt_nw_var _nw;
get_host_info(&_nw, ip, port);
if(connect_to_host(&_nw) == -1)
return;
if(send_loop(_nw) == -1)
return;
}
/*------------------------------------------------------------------------------
| FUNCTION: void get_host_info(struct clt_nw_var *nw, char *ip, char *port)
| *nw : pointer to clients network variables
| *ip : cmd arg that hold the servers IP
| *port : cmd arg that holds the servers listening port
|
| RETURN: void
|
| DATE: Feb 13, 2018
|
| AUTHOR: Alex Zielinski
|
| DESC: Copies the cmd args holding the servers ip and listening port
| ('*ip' and '*port' respectivly) into the appropriate fields
| of the clients network data struct pointed to by '*nw'.
------------------------------------------------------------------------------*/
void get_host_info(struct clt_nw_var *nw, char *ip, char *port)
{
nw->h_ip = inet_addr(ip);
nw->h_port = htons(atoi(port));
}
/*------------------------------------------------------------------------------
| FUNCTION: void print_nw_struct(struct clt_nw_var nw)
| nw : client network variables struct to print
|
| RETURN: void
|
| DATE: Feb 13, 2018
|
| AUTHOR: Alex Zielinski
|
| DESC: Funcion used for testing. Simply prints the contents of 'nw'
------------------------------------------------------------------------------*/
void print_nw_struct(struct clt_nw_var nw)
{
printf("\nsock: %d\nport: %d\nip: %lu\n\n", nw.sd, nw.h_port, nw.h_ip);
}
|
critical.c | /* PMSIS includes */
#include "pmsis.h"
#include "omp.h"
#define ARRAY_SIZE 512
uint32_t a[ARRAY_SIZE] = {0};
uint32_t b[ARRAY_SIZE] = {0};
uint32_t c[ARRAY_SIZE] = {0};
/* Cluster main entry, executed by core 0. */
void cluster_delegate(void *arg)
{
printf("Cluster master core entry\n");
#pragma omp parallel
{
printf("[%d %d] Fork entry\n", pi_cluster_id(), omp_get_thread_num() );
#pragma omp for
for (int i=0; i<ARRAY_SIZE; i++)
{
a[i] = 2 * i;
b[i] = 3 * i;
}
for(volatile int i = 0; i < (10000 << omp_get_thread_num()); i++);
#pragma omp barrier
#pragma omp for
for (int i=0; i<ARRAY_SIZE; i++)
{
c[i] = a[i] + b[i];
printf("[%d %d] c[%d]: %d\n", pi_cluster_id(), omp_get_thread_num(), i, c[i]);
}
#pragma omp barrier
#pragma omp critical
{
uint32_t sum = 0;
for (int i=0; i<ARRAY_SIZE; i++)
{
sum += c[i];
c[i] += i;
}
printf("Core sum %d: %d\n", pi_core_id(), sum);
}
}
printf("Cluster master core exit\n");
}
void helloworld(void)
{
printf("Entering main controller\n");
uint32_t errors = 0;
uint32_t core_id = pi_core_id(), cluster_id = pi_cluster_id();
printf("[%d %d] Hello World!\n", cluster_id, core_id);
struct pi_device cluster_dev = {0};
struct pi_cluster_conf cl_conf = {0};
/* Init cluster configuration structure. */
pi_cluster_conf_init(&cl_conf);
cl_conf.id = 0; /* Set cluster ID. */
/* Configure & open cluster. */
pi_open_from_conf(&cluster_dev, &cl_conf);
if (pi_cluster_open(&cluster_dev))
{
printf("Cluster open failed !\n");
pmsis_exit(-1);
}
/* Prepare cluster task and send it to cluster. */
struct pi_cluster_task cl_task = {0};
cl_task.entry = cluster_delegate;
cl_task.arg = NULL;
pi_cluster_send_task_to_cl(&cluster_dev, &cl_task);
pi_cluster_close(&cluster_dev);
printf("Test success !\n");
pmsis_exit(errors);
}
/* Program Entry. */
int main(void)
{
printf("\n\n\t *** PMSIS HelloWorld ***\n\n");
return pmsis_kickoff((void *) helloworld);
}
|
pr43893.c | /* PR c/43893 */
/* { dg-do run } */
extern void abort (void);
int
main ()
{
int c;
unsigned int i;
int j;
c = 0;
#pragma omp parallel for reduction(+:c)
for (i = 0; i < 1; i++)
c++;
if (c != 1)
abort ();
c = 0;
#pragma omp parallel for reduction(+:c)
for (i = 0; i <= 0; i++)
c++;
if (c != 1)
abort ();
c = 0;
#pragma omp parallel for reduction(+:c)
for (j = - __INT_MAX__ - 1; j < - __INT_MAX__; j++)
c++;
if (c != 1)
abort ();
c = 0;
#pragma omp parallel for reduction(+:c)
for (j = - __INT_MAX__ - 1; j <= - __INT_MAX__ - 1; j++)
c++;
if (c != 1)
abort ();
c = 0;
#pragma omp parallel for reduction(+:c)
for (i = 2U * __INT_MAX__ + 1; i > 2U * __INT_MAX__; i--)
c++;
if (c != 1)
abort ();
c = 0;
#pragma omp parallel for reduction(+:c)
for (i = 2U * __INT_MAX__ + 1; i >= 2U * __INT_MAX__ + 1; i--)
c++;
if (c != 1)
abort ();
c = 0;
#pragma omp parallel for reduction(+:c)
for (j = __INT_MAX__; j > __INT_MAX__ - 1; j--)
c++;
if (c != 1)
abort ();
c = 0;
#pragma omp parallel for reduction(+:c)
for (j = __INT_MAX__; j >= __INT_MAX__; j--)
c++;
if (c != 1)
abort ();
return 0;
}
|
fib.c | #include <stdio.h>
#define N 42
long fib(long n) {
long i, j;
if (n<2)
return n;
else if (n < 30) {
return fib(n-1) + fib (n-2);
}
else {
#pragma omp task shared(i)
i=fib(n-1);
#pragma omp task shared(j)
j=fib(n-2);
#pragma omp taskwait
return i+j;
}
}
int main() {
#pragma omp parallel
#pragma omp single
printf("\nFibonacci(%lu) = %lu\n",(long)N,fib((long)N));
}
|
GB_binop__plus_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__plus_fp64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__plus_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__plus_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_fp64)
// A*D function (colscale): GB (_AxD__plus_fp64)
// D*A function (rowscale): GB (_DxB__plus_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__plus_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__plus_fp64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_fp64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_fp64)
// C=scalar+B GB (_bind1st__plus_fp64)
// C=scalar+B' GB (_bind1st_tran__plus_fp64)
// C=A+scalar GB (_bind2nd__plus_fp64)
// C=A'+scalar GB (_bind2nd_tran__plus_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = (aij + bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x + y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_FP64 || GxB_NO_PLUS_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__plus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__plus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__plus_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__plus_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__plus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__plus_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__plus_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__plus_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__plus_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__plus_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__plus_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__plus_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = (x + bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__plus_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = (aij + y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (x + aij) ; \
}
GrB_Info GB (_bind1st_tran__plus_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (aij + y) ; \
}
GrB_Info GB (_bind2nd_tran__plus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
tinyexr.h | #ifndef TINYEXR_H_
#define TINYEXR_H_
/*
Copyright (c) 2014 - 2021, Syoyo Fujita and many contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Syoyo Fujita nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// TinyEXR contains some OpenEXR code, which is licensed under ------------
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2002, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
// End of OpenEXR license -------------------------------------------------
//
//
// Do this:
// #define TINYEXR_IMPLEMENTATION
// before you include this file in *one* C or C++ file to create the
// implementation.
//
// // i.e. it should look like this:
// #include ...
// #include ...
// #include ...
// #define TINYEXR_IMPLEMENTATION
// #include "tinyexr.h"
//
//
#include <stddef.h> // for size_t
#include <stdint.h> // guess stdint.h is available(C99)
#ifdef __cplusplus
extern "C" {
#endif
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
#define TINYEXR_X86_OR_X64_CPU 1
#else
#define TINYEXR_X86_OR_X64_CPU 0
#endif
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || TINYEXR_X86_OR_X64_CPU
#define TINYEXR_LITTLE_ENDIAN 1
#else
#define TINYEXR_LITTLE_ENDIAN 0
#endif
// Use miniz or not to decode ZIP format pixel. Linking with zlib
// required if this flas is 0.
#ifndef TINYEXR_USE_MINIZ
#define TINYEXR_USE_MINIZ (1)
#endif
// Disable PIZ comporession when applying cpplint.
#ifndef TINYEXR_USE_PIZ
#define TINYEXR_USE_PIZ (1)
#endif
#ifndef TINYEXR_USE_ZFP
#define TINYEXR_USE_ZFP (0) // TinyEXR extension.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#ifndef TINYEXR_USE_THREAD
#define TINYEXR_USE_THREAD (0) // No threaded loading.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#ifndef TINYEXR_USE_OPENMP
#ifdef _OPENMP
#define TINYEXR_USE_OPENMP (1)
#else
#define TINYEXR_USE_OPENMP (0)
#endif
#endif
#define TINYEXR_SUCCESS (0)
#define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1)
#define TINYEXR_ERROR_INVALID_EXR_VERSION (-2)
#define TINYEXR_ERROR_INVALID_ARGUMENT (-3)
#define TINYEXR_ERROR_INVALID_DATA (-4)
#define TINYEXR_ERROR_INVALID_FILE (-5)
#define TINYEXR_ERROR_INVALID_PARAMETER (-6)
#define TINYEXR_ERROR_CANT_OPEN_FILE (-7)
#define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8)
#define TINYEXR_ERROR_INVALID_HEADER (-9)
#define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10)
#define TINYEXR_ERROR_CANT_WRITE_FILE (-11)
#define TINYEXR_ERROR_SERIALZATION_FAILED (-12)
#define TINYEXR_ERROR_LAYER_NOT_FOUND (-13)
// @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf }
// pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2
#define TINYEXR_PIXELTYPE_UINT (0)
#define TINYEXR_PIXELTYPE_HALF (1)
#define TINYEXR_PIXELTYPE_FLOAT (2)
#define TINYEXR_MAX_HEADER_ATTRIBUTES (1024)
#define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128)
#define TINYEXR_COMPRESSIONTYPE_NONE (0)
#define TINYEXR_COMPRESSIONTYPE_RLE (1)
#define TINYEXR_COMPRESSIONTYPE_ZIPS (2)
#define TINYEXR_COMPRESSIONTYPE_ZIP (3)
#define TINYEXR_COMPRESSIONTYPE_PIZ (4)
#define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension
#define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0)
#define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1)
#define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2)
#define TINYEXR_TILE_ONE_LEVEL (0)
#define TINYEXR_TILE_MIPMAP_LEVELS (1)
#define TINYEXR_TILE_RIPMAP_LEVELS (2)
#define TINYEXR_TILE_ROUND_DOWN (0)
#define TINYEXR_TILE_ROUND_UP (1)
typedef struct _EXRVersion {
int version; // this must be 2
// tile format image;
// not zero for only a single-part "normal" tiled file (according to spec.)
int tiled;
int long_name; // long name attribute
// deep image(EXR 2.0);
// for a multi-part file, indicates that at least one part is of type deep* (according to spec.)
int non_image;
int multipart; // multi-part(EXR 2.0)
} EXRVersion;
typedef struct _EXRAttribute {
char name[256]; // name and type are up to 255 chars long.
char type[256];
unsigned char *value; // uint8_t*
int size;
int pad0;
} EXRAttribute;
typedef struct _EXRChannelInfo {
char name[256]; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} EXRChannelInfo;
typedef struct _EXRTile {
int offset_x;
int offset_y;
int level_x;
int level_y;
int width; // actual width in a tile.
int height; // actual height int a tile.
unsigned char **images; // image[channels][pixels]
} EXRTile;
typedef struct _EXRBox2i {
int min_x;
int min_y;
int max_x;
int max_y;
} EXRBox2i;
typedef struct _EXRHeader {
float pixel_aspect_ratio;
int line_order;
EXRBox2i data_window;
EXRBox2i display_window;
float screen_window_center[2];
float screen_window_width;
int chunk_count;
// Properties for tiled format(`tiledesc`).
int tiled;
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
int long_name;
// for a single-part file, agree with the version field bit 11
// for a multi-part file, it is consistent with the type of part
int non_image;
int multipart;
unsigned int header_len;
// Custom attributes(exludes required attributes(e.g. `channels`,
// `compression`, etc)
int num_custom_attributes;
EXRAttribute *custom_attributes; // array of EXRAttribute. size =
// `num_custom_attributes`.
EXRChannelInfo *channels; // [num_channels]
int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for
// each channel. This is overwritten with `requested_pixel_types` when
// loading.
int num_channels;
int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*)
int *requested_pixel_types; // Filled initially by
// ParseEXRHeaderFrom(Meomory|File), then users
// can edit it(only valid for HALF pixel type
// channel)
// name attribute required for multipart files;
// must be unique and non empty (according to spec.);
// use EXRSetNameAttr for setting value;
// max 255 character allowed - excluding terminating zero
char name[256];
} EXRHeader;
typedef struct _EXRMultiPartHeader {
int num_headers;
EXRHeader *headers;
} EXRMultiPartHeader;
typedef struct _EXRImage {
EXRTile *tiles; // Tiled pixel data. The application must reconstruct image
// from tiles manually. NULL if scanline format.
struct _EXRImage* next_level; // NULL if scanline format or image is the last level.
int level_x; // x level index
int level_y; // y level index
unsigned char **images; // image[channels][pixels]. NULL if tiled format.
int width;
int height;
int num_channels;
// Properties for tile format.
int num_tiles;
} EXRImage;
typedef struct _EXRMultiPartImage {
int num_images;
EXRImage *images;
} EXRMultiPartImage;
typedef struct _DeepImage {
const char **channel_names;
float ***image; // image[channels][scanlines][samples]
int **offset_table; // offset_table[scanline][offsets]
int num_channels;
int width;
int height;
int pad0;
} DeepImage;
// @deprecated { For backward compatibility. Not recommended to use. }
// Loads single-frame OpenEXR image. Assume EXR image contains A(single channel
// alpha) or RGB(A) channels.
// Application must free image data as returned by `out_rgba`
// Result image format is: float x RGBA x width x hight
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXR(float **out_rgba, int *width, int *height,
const char *filename, const char **err);
// Loads single-frame OpenEXR image by specifying layer name. Assume EXR image
// contains A(single channel alpha) or RGB(A) channels. Application must free
// image data as returned by `out_rgba` Result image format is: float x RGBA x
// width x hight Returns negative value and may set error string in `err` when
// there's an error When the specified layer name is not found in the EXR file,
// the function will return `TINYEXR_ERROR_LAYER_NOT_FOUND`.
extern int LoadEXRWithLayer(float **out_rgba, int *width, int *height,
const char *filename, const char *layer_name,
const char **err);
//
// Get layer infos from EXR file.
//
// @param[out] layer_names List of layer names. Application must free memory
// after using this.
// @param[out] num_layers The number of layers
// @param[out] err Error string(will be filled when the function returns error
// code). Free it using FreeEXRErrorMessage after using this value.
//
// @return TINYEXR_SUCCEES upon success.
//
extern int EXRLayers(const char *filename, const char **layer_names[],
int *num_layers, const char **err);
// @deprecated { to be removed. }
// Simple wrapper API for ParseEXRHeaderFromFile.
// checking given file is a EXR file(by just look up header)
// @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for
// others
extern int IsEXR(const char *filename);
// @deprecated { to be removed. }
// Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels.
// components must be 1(Grayscale), 3(RGB) or 4(RGBA).
// Input image format is: `float x width x height`, or `float x RGB(A) x width x
// hight`
// Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero
// value.
// Save image as fp32(FLOAT) format when `save_as_fp16` is 0.
// Use ZIP compression by default.
// Returns negative value and may set error string in `err` when there's an
// error
extern int SaveEXR(const float *data, const int width, const int height,
const int components, const int save_as_fp16,
const char *filename, const char **err);
// Returns the number of resolution levels of the image (including the base)
extern int EXRNumLevels(const EXRImage* exr_image);
// Initialize EXRHeader struct
extern void InitEXRHeader(EXRHeader *exr_header);
// Set name attribute of EXRHeader struct (it makes a copy)
extern void EXRSetNameAttr(EXRHeader *exr_header, const char* name);
// Initialize EXRImage struct
extern void InitEXRImage(EXRImage *exr_image);
// Frees internal data of EXRHeader struct
extern int FreeEXRHeader(EXRHeader *exr_header);
// Frees internal data of EXRImage struct
extern int FreeEXRImage(EXRImage *exr_image);
// Frees error message
extern void FreeEXRErrorMessage(const char *msg);
// Parse EXR version header of a file.
extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename);
// Parse EXR version header from memory-mapped EXR data.
extern int ParseEXRVersionFromMemory(EXRVersion *version,
const unsigned char *memory, size_t size);
// Parse single-part OpenEXR header from a file and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version,
const char *filename, const char **err);
// Parse single-part OpenEXR header from a memory and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromMemory(EXRHeader *header,
const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err);
// Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*`
// array.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const char *filename,
const char **err);
// Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*`
// array
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const unsigned char *memory,
size_t size, const char **err);
// Loads single-part OpenEXR image from a file.
// Application must setup `ParseEXRHeaderFromFile` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header,
const char *filename, const char **err);
// Loads single-part OpenEXR image from a memory.
// Application must setup `EXRHeader` with
// `ParseEXRHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header,
const unsigned char *memory,
const size_t size, const char **err);
// Loads multi-part OpenEXR image from a file.
// Application must setup `ParseEXRMultipartHeaderFromFile` before calling this
// function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromFile(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const char *filename,
const char **err);
// Loads multi-part OpenEXR image from a memory.
// Application must setup `EXRHeader*` array with
// `ParseEXRMultipartHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromMemory(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err);
// Saves multi-channel, single-frame OpenEXR image to a file.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int SaveEXRImageToFile(const EXRImage *image,
const EXRHeader *exr_header, const char *filename,
const char **err);
// Saves multi-channel, single-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// Return the number of bytes if success.
// Return zero and will set error string in `err` when there's an
// error.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern size_t SaveEXRImageToMemory(const EXRImage *image,
const EXRHeader *exr_header,
unsigned char **memory, const char **err);
// Saves multi-channel, multi-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// File global attributes (eg. display_window) must be set in the first header.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int SaveEXRMultipartImageToFile(const EXRImage *images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const char *filename, const char **err);
// Saves multi-channel, multi-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// File global attributes (eg. display_window) must be set in the first header.
// Return the number of bytes if success.
// Return zero and will set error string in `err` when there's an
// error.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern size_t SaveEXRMultipartImageToMemory(const EXRImage *images,
const EXRHeader **exr_headers,
unsigned int num_parts,
unsigned char **memory, const char **err);
// Loads single-frame OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadDeepEXR(DeepImage *out_image, const char *filename,
const char **err);
// NOT YET IMPLEMENTED:
// Saves single-frame OpenEXR deep image.
// Returns negative value and may set error string in `err` when there's an
// error
// extern int SaveDeepEXR(const DeepImage *in_image, const char *filename,
// const char **err);
// NOT YET IMPLEMENTED:
// Loads multi-part OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const
// char *filename,
// const char **err);
// For emscripten.
// Loads single-frame OpenEXR image from memory. Assume EXR image contains
// RGB(A) channels.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err);
#ifdef __cplusplus
}
#endif
#endif // TINYEXR_H_
#ifdef TINYEXR_IMPLEMENTATION
#ifndef TINYEXR_IMPLEMENTATION_DEFINED
#define TINYEXR_IMPLEMENTATION_DEFINED
#ifdef _WIN32
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <windows.h> // for UTF-8
#endif
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <sstream>
// #include <iostream> // debug
#include <limits>
#include <string>
#include <vector>
#include <set>
// https://stackoverflow.com/questions/5047971/how-do-i-check-for-c11-support
#if __cplusplus > 199711L || (defined(_MSC_VER) && _MSC_VER >= 1900)
#define TINYEXR_HAS_CXX11 (1)
// C++11
#include <cstdint>
#if TINYEXR_USE_THREAD
#include <atomic>
#include <thread>
#endif
#endif // __cplusplus > 199711L
#if TINYEXR_USE_OPENMP
#include <omp.h>
#endif
#if TINYEXR_USE_MINIZ
#include <miniz.h>
#else
// Issue #46. Please include your own zlib-compatible API header before
// including `tinyexr.h`
//#include "zlib.h"
#endif
#if TINYEXR_USE_ZFP
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Weverything"
#endif
#include "zfp.h"
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
namespace tinyexr {
#if __cplusplus > 199711L
// C++11
typedef uint64_t tinyexr_uint64;
typedef int64_t tinyexr_int64;
#else
// Although `long long` is not a standard type pre C++11, assume it is defined
// as a compiler's extension.
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#endif
typedef unsigned long long tinyexr_uint64;
typedef long long tinyexr_int64;
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
// static bool IsBigEndian(void) {
// union {
// unsigned int i;
// char c[4];
// } bint = {0x01020304};
//
// return bint.c[0] == 1;
//}
static void SetErrorMessage(const std::string &msg, const char **err) {
if (err) {
#ifdef _WIN32
(*err) = _strdup(msg.c_str());
#else
(*err) = strdup(msg.c_str());
#endif
}
}
static void SetWarningMessage(const std::string &msg, const char **warn) {
if (warn) {
#ifdef _WIN32
(*warn) = _strdup(msg.c_str());
#else
(*warn) = strdup(msg.c_str());
#endif
}
}
static const int kEXRVersionSize = 8;
static void cpy2(unsigned short *dst_val, const unsigned short *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
}
static void swap2(unsigned short *val) {
#ifdef TINYEXR_LITTLE_ENDIAN
(void)val;
#else
unsigned short tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[1];
dst[1] = src[0];
#endif
}
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-function"
#endif
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-function"
#endif
static void cpy4(int *dst_val, const int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(unsigned int *dst_val, const unsigned int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(float *dst_val, const float *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
static void swap4(unsigned int *val) {
#ifdef TINYEXR_LITTLE_ENDIAN
(void)val;
#else
unsigned int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
static void swap4(int *val) {
#ifdef TINYEXR_LITTLE_ENDIAN
(void)val;
#else
int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
static void swap4(float *val) {
#ifdef TINYEXR_LITTLE_ENDIAN
(void)val;
#else
float tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
#if 0
static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
dst[4] = src[4];
dst[5] = src[5];
dst[6] = src[6];
dst[7] = src[7];
}
#endif
static void swap8(tinyexr::tinyexr_uint64 *val) {
#ifdef TINYEXR_LITTLE_ENDIAN
(void)val;
#else
tinyexr::tinyexr_uint64 tmp = (*val);
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[7];
dst[1] = src[6];
dst[2] = src[5];
dst[3] = src[4];
dst[4] = src[3];
dst[5] = src[2];
dst[6] = src[1];
dst[7] = src[0];
#endif
}
// https://gist.github.com/rygorous/2156668
union FP32 {
unsigned int u;
float f;
struct {
#if TINYEXR_LITTLE_ENDIAN
unsigned int Mantissa : 23;
unsigned int Exponent : 8;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 8;
unsigned int Mantissa : 23;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
#endif
union FP16 {
unsigned short u;
struct {
#if TINYEXR_LITTLE_ENDIAN
unsigned int Mantissa : 10;
unsigned int Exponent : 5;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 5;
unsigned int Mantissa : 10;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
static FP32 half_to_float(FP16 h) {
static const FP32 magic = {113 << 23};
static const unsigned int shifted_exp = 0x7c00
<< 13; // exponent mask after shift
FP32 o;
o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits
unsigned int exp_ = shifted_exp & o.u; // just the exponent
o.u += (127 - 15) << 23; // exponent adjust
// handle exponent special cases
if (exp_ == shifted_exp) // Inf/NaN?
o.u += (128 - 16) << 23; // extra exp adjust
else if (exp_ == 0) // Zero/Denormal?
{
o.u += 1 << 23; // extra exp adjust
o.f -= magic.f; // renormalize
}
o.u |= (h.u & 0x8000U) << 16U; // sign bit
return o;
}
static FP16 float_to_half_full(FP32 f) {
FP16 o = {0};
// Based on ISPC reference code (with minor modifications)
if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow)
o.s.Exponent = 0;
else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set)
{
o.s.Exponent = 31;
o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf
} else // Normalized number
{
// Exponent unbias the single, then bias the halfp
int newexp = f.s.Exponent - 127 + 15;
if (newexp >= 31) // Overflow, return signed infinity
o.s.Exponent = 31;
else if (newexp <= 0) // Underflow
{
if ((14 - newexp) <= 24) // Mantissa might be non-zero
{
unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit
o.s.Mantissa = mant >> (14 - newexp);
if ((mant >> (13 - newexp)) & 1) // Check for rounding
o.u++; // Round, might overflow into exp bit, but this is OK
}
} else {
o.s.Exponent = static_cast<unsigned int>(newexp);
o.s.Mantissa = f.s.Mantissa >> 13;
if (f.s.Mantissa & 0x1000) // Check for rounding
o.u++; // Round, might overflow to inf, this is OK
}
}
o.s.Sign = f.s.Sign;
return o;
}
// NOTE: From OpenEXR code
// #define IMF_INCREASING_Y 0
// #define IMF_DECREASING_Y 1
// #define IMF_RAMDOM_Y 2
//
// #define IMF_NO_COMPRESSION 0
// #define IMF_RLE_COMPRESSION 1
// #define IMF_ZIPS_COMPRESSION 2
// #define IMF_ZIP_COMPRESSION 3
// #define IMF_PIZ_COMPRESSION 4
// #define IMF_PXR24_COMPRESSION 5
// #define IMF_B44_COMPRESSION 6
// #define IMF_B44A_COMPRESSION 7
#ifdef __clang__
#pragma clang diagnostic push
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#endif
static const char *ReadString(std::string *s, const char *ptr, size_t len) {
// Read untile NULL(\0).
const char *p = ptr;
const char *q = ptr;
while ((size_t(q - ptr) < len) && (*q) != 0) {
q++;
}
if (size_t(q - ptr) >= len) {
(*s).clear();
return NULL;
}
(*s) = std::string(p, q);
return q + 1; // skip '\0'
}
static bool ReadAttribute(std::string *name, std::string *type,
std::vector<unsigned char> *data, size_t *marker_size,
const char *marker, size_t size) {
size_t name_len = strnlen(marker, size);
if (name_len == size) {
// String does not have a terminating character.
return false;
}
*name = std::string(marker, name_len);
marker += name_len + 1;
size -= name_len + 1;
size_t type_len = strnlen(marker, size);
if (type_len == size) {
return false;
}
*type = std::string(marker, type_len);
marker += type_len + 1;
size -= type_len + 1;
if (size < sizeof(uint32_t)) {
return false;
}
uint32_t data_len;
memcpy(&data_len, marker, sizeof(uint32_t));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len == 0) {
if ((*type).compare("string") == 0) {
// Accept empty string attribute.
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t);
data->resize(1);
(*data)[0] = '\0';
return true;
} else {
return false;
}
}
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
if (size < data_len) {
return false;
}
data->resize(static_cast<size_t>(data_len));
memcpy(&data->at(0), marker, static_cast<size_t>(data_len));
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len;
return true;
}
static void WriteAttributeToMemory(std::vector<unsigned char> *out,
const char *name, const char *type,
const unsigned char *data, int len) {
out->insert(out->end(), name, name + strlen(name) + 1);
out->insert(out->end(), type, type + strlen(type) + 1);
int outLen = len;
tinyexr::swap4(&outLen);
out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen),
reinterpret_cast<unsigned char *>(&outLen) + sizeof(int));
out->insert(out->end(), data, data + len);
}
typedef struct {
std::string name; // less than 255 bytes long
int pixel_type;
int requested_pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} ChannelInfo;
typedef struct {
int min_x;
int min_y;
int max_x;
int max_y;
} Box2iInfo;
struct HeaderInfo {
std::vector<tinyexr::ChannelInfo> channels;
std::vector<EXRAttribute> attributes;
Box2iInfo data_window;
int line_order;
Box2iInfo display_window;
float screen_window_center[2];
float screen_window_width;
float pixel_aspect_ratio;
int chunk_count;
// Tiled format
int tiled; // Non-zero if the part is tiled.
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
unsigned int header_len;
int compression_type;
// required for multi-part or non-image files
std::string name;
// required for multi-part or non-image files
std::string type;
void clear() {
channels.clear();
attributes.clear();
data_window.min_x = 0;
data_window.min_y = 0;
data_window.max_x = 0;
data_window.max_y = 0;
line_order = 0;
display_window.min_x = 0;
display_window.min_y = 0;
display_window.max_x = 0;
display_window.max_y = 0;
screen_window_center[0] = 0.0f;
screen_window_center[1] = 0.0f;
screen_window_width = 0.0f;
pixel_aspect_ratio = 0.0f;
chunk_count = 0;
// Tiled format
tiled = 0;
tile_size_x = 0;
tile_size_y = 0;
tile_level_mode = 0;
tile_rounding_mode = 0;
header_len = 0;
compression_type = 0;
name.clear();
type.clear();
}
};
static bool ReadChannelInfo(std::vector<ChannelInfo> &channels,
const std::vector<unsigned char> &data) {
const char *p = reinterpret_cast<const char *>(&data.at(0));
for (;;) {
if ((*p) == 0) {
break;
}
ChannelInfo info;
tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) -
(p - reinterpret_cast<const char *>(data.data()));
if (data_len < 0) {
return false;
}
p = ReadString(&info.name, p, size_t(data_len));
if ((p == NULL) && (info.name.empty())) {
// Buffer overrun. Issue #51.
return false;
}
const unsigned char *data_end =
reinterpret_cast<const unsigned char *>(p) + 16;
if (data_end >= (data.data() + data.size())) {
return false;
}
memcpy(&info.pixel_type, p, sizeof(int));
p += 4;
info.p_linear = static_cast<unsigned char>(p[0]); // uchar
p += 1 + 3; // reserved: uchar[3]
memcpy(&info.x_sampling, p, sizeof(int)); // int
p += 4;
memcpy(&info.y_sampling, p, sizeof(int)); // int
p += 4;
tinyexr::swap4(&info.pixel_type);
tinyexr::swap4(&info.x_sampling);
tinyexr::swap4(&info.y_sampling);
channels.push_back(info);
}
return true;
}
static void WriteChannelInfo(std::vector<unsigned char> &data,
const std::vector<ChannelInfo> &channels) {
size_t sz = 0;
// Calculate total size.
for (size_t c = 0; c < channels.size(); c++) {
sz += channels[c].name.length() + 1; // +1 for \0
sz += 16; // 4 * int
}
data.resize(sz + 1);
unsigned char *p = &data.at(0);
for (size_t c = 0; c < channels.size(); c++) {
memcpy(p, channels[c].name.c_str(), channels[c].name.length());
p += channels[c].name.length();
(*p) = '\0';
p++;
int pixel_type = channels[c].requested_pixel_type;
int x_sampling = channels[c].x_sampling;
int y_sampling = channels[c].y_sampling;
tinyexr::swap4(&pixel_type);
tinyexr::swap4(&x_sampling);
tinyexr::swap4(&y_sampling);
memcpy(p, &pixel_type, sizeof(int));
p += sizeof(int);
(*p) = channels[c].p_linear;
p += 4;
memcpy(p, &x_sampling, sizeof(int));
p += sizeof(int);
memcpy(p, &y_sampling, sizeof(int));
p += sizeof(int);
}
(*p) = '\0';
}
static void CompressZip(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
#if TINYEXR_USE_MINIZ
//
// Compress the data using miniz
//
mz_ulong outSize = mz_compressBound(src_size);
int ret = mz_compress(
dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)),
src_size);
assert(ret == MZ_OK);
(void)ret;
compressedSize = outSize;
#else
uLong outSize = compressBound(static_cast<uLong>(src_size));
int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)),
src_size);
assert(ret == Z_OK);
compressedSize = outSize;
#endif
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressZip(unsigned char *dst,
unsigned long *uncompressed_size /* inout */,
const unsigned char *src, unsigned long src_size) {
if ((*uncompressed_size) == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
std::vector<unsigned char> tmpBuf(*uncompressed_size);
#if TINYEXR_USE_MINIZ
int ret =
mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (MZ_OK != ret) {
return false;
}
#else
int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (Z_OK != ret) {
return false;
}
#endif
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size);
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(*uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + (*uncompressed_size);
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
// RLE code from OpenEXR --------------------------------------
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wsign-conversion"
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
const int MIN_RUN_LENGTH = 3;
const int MAX_RUN_LENGTH = 127;
//
// Compress an array of bytes, using run-length encoding,
// and return the length of the compressed data.
//
static int rleCompress(int inLength, const char in[], signed char out[]) {
const char *inEnd = in + inLength;
const char *runStart = in;
const char *runEnd = in + 1;
signed char *outWrite = out;
while (runStart < inEnd) {
while (runEnd < inEnd && *runStart == *runEnd &&
runEnd - runStart - 1 < MAX_RUN_LENGTH) {
++runEnd;
}
if (runEnd - runStart >= MIN_RUN_LENGTH) {
//
// Compressible run
//
*outWrite++ = static_cast<char>(runEnd - runStart) - 1;
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart));
runStart = runEnd;
} else {
//
// Uncompressable run
//
while (runEnd < inEnd &&
((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) ||
(runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) &&
runEnd - runStart < MAX_RUN_LENGTH) {
++runEnd;
}
*outWrite++ = static_cast<char>(runStart - runEnd);
while (runStart < runEnd) {
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart++));
}
}
++runEnd;
}
return static_cast<int>(outWrite - out);
}
//
// Uncompress an array of bytes compressed with rleCompress().
// Returns the length of the oncompressed data, or 0 if the
// length of the uncompressed data would be more than maxLength.
//
static int rleUncompress(int inLength, int maxLength, const signed char in[],
char out[]) {
char *outStart = out;
while (inLength > 0) {
if (*in < 0) {
int count = -(static_cast<int>(*in++));
inLength -= count + 1;
// Fixes #116: Add bounds check to in buffer.
if ((0 > (maxLength -= count)) || (inLength < 0)) return 0;
memcpy(out, in, count);
out += count;
in += count;
} else {
int count = *in++;
inLength -= 2;
if (0 > (maxLength -= count + 1)) return 0;
memset(out, *reinterpret_cast<const char *>(in), count + 1);
out += count + 1;
in++;
}
}
return static_cast<int>(out - outStart);
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
// End of RLE code from OpenEXR -----------------------------------
static void CompressRle(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// outSize will be (srcSiz * 3) / 2 at max.
int outSize = rleCompress(static_cast<int>(src_size),
reinterpret_cast<const char *>(&tmpBuf.at(0)),
reinterpret_cast<signed char *>(dst));
assert(outSize > 0);
compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize);
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressRle(unsigned char *dst,
const unsigned long uncompressed_size,
const unsigned char *src, unsigned long src_size) {
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
// Workaround for issue #112.
// TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`.
if (src_size <= 2) {
return false;
}
std::vector<unsigned char> tmpBuf(uncompressed_size);
int ret = rleUncompress(static_cast<int>(src_size),
static_cast<int>(uncompressed_size),
reinterpret_cast<const signed char *>(src),
reinterpret_cast<char *>(&tmpBuf.at(0)));
if (ret != static_cast<int>(uncompressed_size)) {
return false;
}
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + uncompressed_size;
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + uncompressed_size;
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
#if TINYEXR_USE_PIZ
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
//
// PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp
//
// -----------------------------------------------------------------
// Copyright (c) 2004, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC)
// (3 clause BSD license)
//
struct PIZChannelData {
unsigned short *start;
unsigned short *end;
int nx;
int ny;
int ys;
int size;
};
//-----------------------------------------------------------------------------
//
// 16-bit Haar Wavelet encoding and decoding
//
// The source code in this file is derived from the encoding
// and decoding routines written by Christian Rouet for his
// PIZ image file format.
//
//-----------------------------------------------------------------------------
//
// Wavelet basis functions without modulo arithmetic; they produce
// the best compression ratios when the wavelet-transformed data are
// Huffman-encoded, but the wavelet transform works only for 14-bit
// data (untransformed data values must be less than (1 << 14)).
//
inline void wenc14(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
short as = static_cast<short>(a);
short bs = static_cast<short>(b);
short ms = (as + bs) >> 1;
short ds = as - bs;
l = static_cast<unsigned short>(ms);
h = static_cast<unsigned short>(ds);
}
inline void wdec14(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
short ls = static_cast<short>(l);
short hs = static_cast<short>(h);
int hi = hs;
int ai = ls + (hi & 1) + (hi >> 1);
short as = static_cast<short>(ai);
short bs = static_cast<short>(ai - hi);
a = static_cast<unsigned short>(as);
b = static_cast<unsigned short>(bs);
}
//
// Wavelet basis functions with modulo arithmetic; they work with full
// 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't
// compress the data quite as well.
//
const int NBITS = 16;
const int A_OFFSET = 1 << (NBITS - 1);
const int M_OFFSET = 1 << (NBITS - 1);
const int MOD_MASK = (1 << NBITS) - 1;
inline void wenc16(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
int ao = (a + A_OFFSET) & MOD_MASK;
int m = ((ao + b) >> 1);
int d = ao - b;
if (d < 0) m = (m + M_OFFSET) & MOD_MASK;
d &= MOD_MASK;
l = static_cast<unsigned short>(m);
h = static_cast<unsigned short>(d);
}
inline void wdec16(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
int m = l;
int d = h;
int bb = (m - (d >> 1)) & MOD_MASK;
int aa = (d + bb - A_OFFSET) & MOD_MASK;
b = static_cast<unsigned short>(bb);
a = static_cast<unsigned short>(aa);
}
//
// 2D Wavelet encoding:
//
static void wav2Encode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1; // == 1 << level
int p2 = 2; // == 1 << (level+1)
//
// Hierarchical loop on smaller dimension n
//
while (p2 <= n) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet encoding
//
if (w14) {
wenc14(*px, *p01, i00, i01);
wenc14(*p10, *p11, i10, i11);
wenc14(i00, i10, *px, *p10);
wenc14(i01, i11, *p01, *p11);
} else {
wenc16(*px, *p01, i00, i01);
wenc16(*p10, *p11, i10, i11);
wenc16(i00, i10, *px, *p10);
wenc16(i01, i11, *p01, *p11);
}
}
//
// Encode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wenc14(*px, *p10, i00, *p10);
else
wenc16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Encode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wenc14(*px, *p01, i00, *p01);
else
wenc16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p = p2;
p2 <<= 1;
}
}
//
// 2D Wavelet decoding:
//
static void wav2Decode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1;
int p2;
//
// Search max level
//
while (p <= n) p <<= 1;
p >>= 1;
p2 = p;
p >>= 1;
//
// Hierarchical loop on smaller dimension n
//
while (p >= 1) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet decoding
//
if (w14) {
wdec14(*px, *p10, i00, i10);
wdec14(*p01, *p11, i01, i11);
wdec14(i00, i01, *px, *p01);
wdec14(i10, i11, *p10, *p11);
} else {
wdec16(*px, *p10, i00, i10);
wdec16(*p01, *p11, i01, i11);
wdec16(i00, i01, *px, *p01);
wdec16(i10, i11, *p10, *p11);
}
}
//
// Decode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wdec14(*px, *p10, i00, *p10);
else
wdec16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Decode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wdec14(*px, *p01, i00, *p01);
else
wdec16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p2 = p;
p >>= 1;
}
}
//-----------------------------------------------------------------------------
//
// 16-bit Huffman compression and decompression.
//
// The source code in this file is derived from the 8-bit
// Huffman compression and decompression routines written
// by Christian Rouet for his PIZ image file format.
//
//-----------------------------------------------------------------------------
// Adds some modification for tinyexr.
const int HUF_ENCBITS = 16; // literal (value) bit length
const int HUF_DECBITS = 14; // decoding bit size (>= 8)
const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size
const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size
const int HUF_DECMASK = HUF_DECSIZE - 1;
struct HufDec { // short code long code
//-------------------------------
unsigned int len : 8; // code length 0
unsigned int lit : 24; // lit p size
unsigned int *p; // 0 lits
};
inline long long hufLength(long long code) { return code & 63; }
inline long long hufCode(long long code) { return code >> 6; }
inline void outputBits(int nBits, long long bits, long long &c, int &lc,
char *&out) {
c <<= nBits;
lc += nBits;
c |= bits;
while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8)));
}
inline long long getBits(int nBits, long long &c, int &lc, const char *&in) {
while (lc < nBits) {
c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++));
lc += 8;
}
lc -= nBits;
return (c >> lc) & ((1 << nBits) - 1);
}
//
// ENCODING TABLE BUILDING & (UN)PACKING
//
//
// Build a "canonical" Huffman code table:
// - for each (uncompressed) symbol, hcode contains the length
// of the corresponding code (in the compressed data)
// - canonical codes are computed and stored in hcode
// - the rules for constructing canonical codes are as follows:
// * shorter codes (if filled with zeroes to the right)
// have a numerically higher value than longer codes
// * for codes with the same length, numerical values
// increase with numerical symbol values
// - because the canonical code table can be constructed from
// symbol lengths alone, the code table can be transmitted
// without sending the actual code values
// - see http://www.compressconsult.com/huffman/
//
static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) {
long long n[59];
//
// For each i from 0 through 58, count the
// number of different codes of length i, and
// store the count in n[i].
//
for (int i = 0; i <= 58; ++i) n[i] = 0;
for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1;
//
// For each i from 58 through 1, compute the
// numerically lowest code with length i, and
// store that code in n[i].
//
long long c = 0;
for (int i = 58; i > 0; --i) {
long long nc = ((c + n[i]) >> 1);
n[i] = c;
c = nc;
}
//
// hcode[i] contains the length, l, of the
// code for symbol i. Assign the next available
// code of length l to the symbol and store both
// l and the code in hcode[i].
//
for (int i = 0; i < HUF_ENCSIZE; ++i) {
int l = static_cast<int>(hcode[i]);
if (l > 0) hcode[i] = l | (n[l]++ << 6);
}
}
//
// Compute Huffman codes (based on frq input) and store them in frq:
// - code structure is : [63:lsb - 6:msb] | [5-0: bit length];
// - max code length is 58 bits;
// - codes outside the range [im-iM] have a null length (unused values);
// - original frequencies are destroyed;
// - encoding tables are used by hufEncode() and hufBuildDecTable();
//
struct FHeapCompare {
bool operator()(long long *a, long long *b) { return *a > *b; }
};
static void hufBuildEncTable(
long long *frq, // io: input frequencies [HUF_ENCSIZE], output table
int *im, // o: min frq index
int *iM) // o: max frq index
{
//
// This function assumes that when it is called, array frq
// indicates the frequency of all possible symbols in the data
// that are to be Huffman-encoded. (frq[i] contains the number
// of occurrences of symbol i in the data.)
//
// The loop below does three things:
//
// 1) Finds the minimum and maximum indices that point
// to non-zero entries in frq:
//
// frq[im] != 0, and frq[i] == 0 for all i < im
// frq[iM] != 0, and frq[i] == 0 for all i > iM
//
// 2) Fills array fHeap with pointers to all non-zero
// entries in frq.
//
// 3) Initializes array hlink such that hlink[i] == i
// for all array entries.
//
std::vector<int> hlink(HUF_ENCSIZE);
std::vector<long long *> fHeap(HUF_ENCSIZE);
*im = 0;
while (!frq[*im]) (*im)++;
int nf = 0;
for (int i = *im; i < HUF_ENCSIZE; i++) {
hlink[i] = i;
if (frq[i]) {
fHeap[nf] = &frq[i];
nf++;
*iM = i;
}
}
//
// Add a pseudo-symbol, with a frequency count of 1, to frq;
// adjust the fHeap and hlink array accordingly. Function
// hufEncode() uses the pseudo-symbol for run-length encoding.
//
(*iM)++;
frq[*iM] = 1;
fHeap[nf] = &frq[*iM];
nf++;
//
// Build an array, scode, such that scode[i] contains the number
// of bits assigned to symbol i. Conceptually this is done by
// constructing a tree whose leaves are the symbols with non-zero
// frequency:
//
// Make a heap that contains all symbols with a non-zero frequency,
// with the least frequent symbol on top.
//
// Repeat until only one symbol is left on the heap:
//
// Take the two least frequent symbols off the top of the heap.
// Create a new node that has first two nodes as children, and
// whose frequency is the sum of the frequencies of the first
// two nodes. Put the new node back into the heap.
//
// The last node left on the heap is the root of the tree. For each
// leaf node, the distance between the root and the leaf is the length
// of the code for the corresponding symbol.
//
// The loop below doesn't actually build the tree; instead we compute
// the distances of the leaves from the root on the fly. When a new
// node is added to the heap, then that node's descendants are linked
// into a single linear list that starts at the new node, and the code
// lengths of the descendants (that is, their distance from the root
// of the tree) are incremented by one.
//
std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
std::vector<long long> scode(HUF_ENCSIZE);
memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE);
while (nf > 1) {
//
// Find the indices, mm and m, of the two smallest non-zero frq
// values in fHeap, add the smallest frq to the second-smallest
// frq, and remove the smallest frq value from fHeap.
//
int mm = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
--nf;
int m = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
frq[m] += frq[mm];
std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
//
// The entries in scode are linked into lists with the
// entries in hlink serving as "next" pointers and with
// the end of a list marked by hlink[j] == j.
//
// Traverse the lists that start at scode[m] and scode[mm].
// For each element visited, increment the length of the
// corresponding code by one bit. (If we visit scode[j]
// during the traversal, then the code for symbol j becomes
// one bit longer.)
//
// Merge the lists that start at scode[m] and scode[mm]
// into a single list that starts at scode[m].
//
//
// Add a bit to all codes in the first list.
//
for (int j = m;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) {
//
// Merge the two lists.
//
hlink[j] = mm;
break;
}
}
//
// Add a bit to all codes in the second list
//
for (int j = mm;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) break;
}
}
//
// Build a canonical Huffman code table, replacing the code
// lengths in scode with (code, code length) pairs. Copy the
// code table from scode into frq.
//
hufCanonicalCodeTable(scode.data());
memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE);
}
//
// Pack an encoding table:
// - only code lengths, not actual codes, are stored
// - runs of zeroes are compressed as follows:
//
// unpacked packed
// --------------------------------
// 1 zero 0 (6 bits)
// 2 zeroes 59
// 3 zeroes 60
// 4 zeroes 61
// 5 zeroes 62
// n zeroes (6 or more) 63 n-6 (6 + 8 bits)
//
const int SHORT_ZEROCODE_RUN = 59;
const int LONG_ZEROCODE_RUN = 63;
const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN;
const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN;
static void hufPackEncTable(
const long long *hcode, // i : encoding table [HUF_ENCSIZE]
int im, // i : min hcode index
int iM, // i : max hcode index
char **pcode) // o: ptr to packed table (updated)
{
char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
int l = hufLength(hcode[im]);
if (l == 0) {
int zerun = 1;
while ((im < iM) && (zerun < LONGEST_LONG_RUN)) {
if (hufLength(hcode[im + 1]) > 0) break;
im++;
zerun++;
}
if (zerun >= 2) {
if (zerun >= SHORTEST_LONG_RUN) {
outputBits(6, LONG_ZEROCODE_RUN, c, lc, p);
outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p);
} else {
outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p);
}
continue;
}
}
outputBits(6, l, c, lc, p);
}
if (lc > 0) *p++ = (unsigned char)(c << (8 - lc));
*pcode = p;
}
//
// Unpack an encoding table packed by hufPackEncTable():
//
static bool hufUnpackEncTable(
const char **pcode, // io: ptr to packed table (updated)
int ni, // i : input size (in bytes)
int im, // i : min hcode index
int iM, // i : max hcode index
long long *hcode) // o: encoding table [HUF_ENCSIZE]
{
memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE);
const char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
if (p - *pcode >= ni) {
return false;
}
long long l = hcode[im] = getBits(6, c, lc, p); // code length
if (l == (long long)LONG_ZEROCODE_RUN) {
if (p - *pcode > ni) {
return false;
}
int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
} else if (l >= (long long)SHORT_ZEROCODE_RUN) {
int zerun = l - SHORT_ZEROCODE_RUN + 2;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
}
}
*pcode = const_cast<char *>(p);
hufCanonicalCodeTable(hcode);
return true;
}
//
// DECODING TABLE BUILDING
//
//
// Clear a newly allocated decoding table so that it contains only zeroes.
//
static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
for (int i = 0; i < HUF_DECSIZE; i++) {
hdecod[i].len = 0;
hdecod[i].lit = 0;
hdecod[i].p = NULL;
}
// memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE);
}
//
// Build a decoding hash table based on the encoding table hcode:
// - short codes (<= HUF_DECBITS) are resolved with a single table access;
// - long code entry allocations are not optimized, because long codes are
// unfrequent;
// - decoding tables are used by hufDecode();
//
static bool hufBuildDecTable(const long long *hcode, // i : encoding table
int im, // i : min index in hcode
int iM, // i : max index in hcode
HufDec *hdecod) // o: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
//
// Init hashtable & loop on all codes.
// Assumes that hufClearDecTable(hdecod) has already been called.
//
for (; im <= iM; im++) {
long long c = hufCode(hcode[im]);
int l = hufLength(hcode[im]);
if (c >> l) {
//
// Error: c is supposed to be an l-bit code,
// but c contains a value that is greater
// than the largest l-bit number.
//
// invalidTableEntry();
return false;
}
if (l > HUF_DECBITS) {
//
// Long code: add a secondary entry
//
HufDec *pl = hdecod + (c >> (l - HUF_DECBITS));
if (pl->len) {
//
// Error: a short code has already
// been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->lit++;
if (pl->p) {
unsigned int *p = pl->p;
pl->p = new unsigned int[pl->lit];
for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i];
delete[] p;
} else {
pl->p = new unsigned int[1];
}
pl->p[pl->lit - 1] = im;
} else if (l) {
//
// Short code: init all primary entries
//
HufDec *pl = hdecod + (c << (HUF_DECBITS - l));
for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) {
if (pl->len || pl->p) {
//
// Error: a short code or a long code has
// already been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->len = l;
pl->lit = im;
}
}
}
return true;
}
//
// Free the long code entries of a decoding table built by hufBuildDecTable()
//
static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table
{
for (int i = 0; i < HUF_DECSIZE; i++) {
if (hdecod[i].p) {
delete[] hdecod[i].p;
hdecod[i].p = 0;
}
}
}
//
// ENCODING
//
inline void outputCode(long long code, long long &c, int &lc, char *&out) {
outputBits(hufLength(code), hufCode(code), c, lc, out);
}
inline void sendCode(long long sCode, int runCount, long long runCode,
long long &c, int &lc, char *&out) {
//
// Output a run of runCount instances of the symbol sCount.
// Output the symbols explicitly, or if that is shorter, output
// the sCode symbol once followed by a runCode symbol and runCount
// expressed as an 8-bit number.
//
if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) {
outputCode(sCode, c, lc, out);
outputCode(runCode, c, lc, out);
outputBits(8, runCount, c, lc, out);
} else {
while (runCount-- >= 0) outputCode(sCode, c, lc, out);
}
}
//
// Encode (compress) ni values based on the Huffman encoding table hcode:
//
static int hufEncode // return: output size (in bits)
(const long long *hcode, // i : encoding table
const unsigned short *in, // i : uncompressed input buffer
const int ni, // i : input buffer size (in bytes)
int rlc, // i : rl code
char *out) // o: compressed output buffer
{
char *outStart = out;
long long c = 0; // bits not yet written to out
int lc = 0; // number of valid bits in c (LSB)
int s = in[0];
int cs = 0;
//
// Loop on input values
//
for (int i = 1; i < ni; i++) {
//
// Count same values or send code
//
if (s == in[i] && cs < 255) {
cs++;
} else {
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
cs = 0;
}
s = in[i];
}
//
// Send remaining code
//
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
if (lc) *out = (c << (8 - lc)) & 0xff;
return (out - outStart) * 8 + lc;
}
//
// DECODING
//
//
// In order to force the compiler to inline them,
// getChar() and getCode() are implemented as macros
// instead of "inline" functions.
//
#define getChar(c, lc, in) \
{ \
c = (c << 8) | *(unsigned char *)(in++); \
lc += 8; \
}
#if 0
#define getCode(po, rlc, c, lc, in, out, ob, oe) \
{ \
if (po == rlc) { \
if (lc < 8) getChar(c, lc, in); \
\
lc -= 8; \
\
unsigned char cs = (c >> lc); \
\
if (out + cs > oe) return false; \
\
/* TinyEXR issue 78 */ \
unsigned short s = out[-1]; \
\
while (cs-- > 0) *out++ = s; \
} else if (out < oe) { \
*out++ = po; \
} else { \
return false; \
} \
}
#else
static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in,
const char *in_end, unsigned short *&out,
const unsigned short *ob, const unsigned short *oe) {
(void)ob;
if (po == rlc) {
if (lc < 8) {
/* TinyEXR issue 78 */
/* TinyEXR issue 160. in + 1 -> in */
if (in >= in_end) {
return false;
}
getChar(c, lc, in);
}
lc -= 8;
unsigned char cs = (c >> lc);
if (out + cs > oe) return false;
// Bounds check for safety
// Issue 100.
if ((out - 1) < ob) return false;
unsigned short s = out[-1];
while (cs-- > 0) *out++ = s;
} else if (out < oe) {
*out++ = po;
} else {
return false;
}
return true;
}
#endif
//
// Decode (uncompress) ni bits based on encoding & decoding tables:
//
static bool hufDecode(const long long *hcode, // i : encoding table
const HufDec *hdecod, // i : decoding table
const char *in, // i : compressed input buffer
int ni, // i : input size (in bits)
int rlc, // i : run-length code
int no, // i : expected output size (in bytes)
unsigned short *out) // o: uncompressed output buffer
{
long long c = 0;
int lc = 0;
unsigned short *outb = out; // begin
unsigned short *oe = out + no; // end
const char *ie = in + (ni + 7) / 8; // input byte size
//
// Loop on input bytes
//
while (in < ie) {
getChar(c, lc, in);
//
// Access decoding table
//
while (lc >= HUF_DECBITS) {
const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK];
if (pl.len) {
//
// Get short code
//
lc -= pl.len;
// std::cout << "lit = " << pl.lit << std::endl;
// std::cout << "rlc = " << rlc << std::endl;
// std::cout << "c = " << c << std::endl;
// std::cout << "lc = " << lc << std::endl;
// std::cout << "in = " << in << std::endl;
// std::cout << "out = " << out << std::endl;
// std::cout << "oe = " << oe << std::endl;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
if (!pl.p) {
return false;
}
// invalidCode(); // wrong code
//
// Search long code
//
int j;
for (j = 0; j < pl.lit; j++) {
int l = hufLength(hcode[pl.p[j]]);
while (lc < l && in < ie) // get more bits
getChar(c, lc, in);
if (lc >= l) {
if (hufCode(hcode[pl.p[j]]) ==
((c >> (lc - l)) & (((long long)(1) << l) - 1))) {
//
// Found : get long code
//
lc -= l;
if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
break;
}
}
}
if (j == pl.lit) {
return false;
// invalidCode(); // Not found
}
}
}
}
//
// Get remaining (short) codes
//
int i = (8 - ni) & 7;
c >>= i;
lc -= i;
while (lc > 0) {
const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK];
if (pl.len) {
lc -= pl.len;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
return false;
// invalidCode(); // wrong (long) code
}
}
if (out - outb != no) {
return false;
}
// notEnoughData ();
return true;
}
static void countFrequencies(std::vector<long long> &freq,
const unsigned short data[/*n*/], int n) {
for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0;
for (int i = 0; i < n; ++i) ++freq[data[i]];
}
static void writeUInt(char buf[4], unsigned int i) {
unsigned char *b = (unsigned char *)buf;
b[0] = i;
b[1] = i >> 8;
b[2] = i >> 16;
b[3] = i >> 24;
}
static unsigned int readUInt(const char buf[4]) {
const unsigned char *b = (const unsigned char *)buf;
return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) |
((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000);
}
//
// EXTERNAL INTERFACE
//
static int hufCompress(const unsigned short raw[], int nRaw,
char compressed[]) {
if (nRaw == 0) return 0;
std::vector<long long> freq(HUF_ENCSIZE);
countFrequencies(freq, raw, nRaw);
int im = 0;
int iM = 0;
hufBuildEncTable(freq.data(), &im, &iM);
char *tableStart = compressed + 20;
char *tableEnd = tableStart;
hufPackEncTable(freq.data(), im, iM, &tableEnd);
int tableLength = tableEnd - tableStart;
char *dataStart = tableEnd;
int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart);
int data_length = (nBits + 7) / 8;
writeUInt(compressed, im);
writeUInt(compressed + 4, iM);
writeUInt(compressed + 8, tableLength);
writeUInt(compressed + 12, nBits);
writeUInt(compressed + 16, 0); // room for future extensions
return dataStart + data_length - compressed;
}
static bool hufUncompress(const char compressed[], int nCompressed,
std::vector<unsigned short> *raw) {
if (nCompressed == 0) {
if (raw->size() != 0) return false;
return false;
}
int im = readUInt(compressed);
int iM = readUInt(compressed + 4);
// int tableLength = readUInt (compressed + 8);
int nBits = readUInt(compressed + 12);
if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false;
const char *ptr = compressed + 20;
//
// Fast decoder needs at least 2x64-bits of compressed data, and
// needs to be run-able on this platform. Otherwise, fall back
// to the original decoder
//
// if (FastHufDecoder::enabled() && nBits > 128)
//{
// FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM);
// fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw);
//}
// else
{
std::vector<long long> freq(HUF_ENCSIZE);
std::vector<HufDec> hdec(HUF_DECSIZE);
hufClearDecTable(&hdec.at(0));
hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM,
&freq.at(0));
{
if (nBits > 8 * (nCompressed - (ptr - compressed))) {
return false;
}
hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0));
hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(),
raw->data());
}
// catch (...)
//{
// hufFreeDecTable (hdec);
// throw;
//}
hufFreeDecTable(&hdec.at(0));
}
return true;
}
//
// Functions to compress the range of values in the pixel data
//
const int USHORT_RANGE = (1 << 16);
const int BITMAP_SIZE = (USHORT_RANGE >> 3);
static void bitmapFromData(const unsigned short data[/*nData*/], int nData,
unsigned char bitmap[BITMAP_SIZE],
unsigned short &minNonZero,
unsigned short &maxNonZero) {
for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0;
for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7));
bitmap[0] &= ~1; // zero is not explicitly stored in
// the bitmap; we assume that the
// data always contain zeroes
minNonZero = BITMAP_SIZE - 1;
maxNonZero = 0;
for (int i = 0; i < BITMAP_SIZE; ++i) {
if (bitmap[i]) {
if (minNonZero > i) minNonZero = i;
if (maxNonZero < i) maxNonZero = i;
}
}
}
static unsigned short forwardLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
lut[i] = k++;
else
lut[i] = 0;
}
return k - 1; // maximum value stored in lut[],
} // i.e. number of ones in bitmap minus 1
static unsigned short reverseLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i;
}
int n = k - 1;
while (k < USHORT_RANGE) lut[k++] = 0;
return n; // maximum k where lut[k] is non-zero,
} // i.e. number of ones in bitmap minus 1
static void applyLut(const unsigned short lut[USHORT_RANGE],
unsigned short data[/*nData*/], int nData) {
for (int i = 0; i < nData; ++i) data[i] = lut[data[i]];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif // __clang__
#ifdef _MSC_VER
#pragma warning(pop)
#endif
static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize,
const unsigned char *inPtr, size_t inSize,
const std::vector<ChannelInfo> &channelInfo,
int data_width, int num_lines) {
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !TINYEXR_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
// Assume `inSize` is multiple of 2 or 4.
std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short));
std::vector<PIZChannelData> channelData(channelInfo.size());
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t c = 0; c < channelData.size(); c++) {
PIZChannelData &cd = channelData[c];
cd.start = tmpBufferEnd;
cd.end = cd.start;
cd.nx = data_width;
cd.ny = num_lines;
// cd.ys = c.channel().ySampling;
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (channelInfo[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
cd.size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += cd.nx * cd.ny * cd.size;
}
const unsigned char *ptr = inPtr;
for (int y = 0; y < num_lines; ++y) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(cd.end, ptr, n * sizeof(unsigned short));
ptr += n * sizeof(unsigned short);
cd.end += n;
}
}
bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()),
bitmap.data(), minNonZero, maxNonZero);
std::vector<unsigned short> lut(USHORT_RANGE);
unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data());
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()));
//
// Store range compression info in _outBuffer
//
char *buf = reinterpret_cast<char *>(outPtr);
memcpy(buf, &minNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
memcpy(buf, &maxNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
if (minNonZero <= maxNonZero) {
memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero),
maxNonZero - minNonZero + 1);
buf += maxNonZero - minNonZero + 1;
}
//
// Apply wavelet encoding
//
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Apply Huffman encoding; append the result to _outBuffer
//
// length header(4byte), then huff data. Initialize length header with zero,
// then later fill it by `length`.
char *lengthPtr = buf;
int zero = 0;
memcpy(buf, &zero, sizeof(int));
buf += sizeof(int);
int length =
hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf);
memcpy(lengthPtr, &length, sizeof(int));
(*outSize) = static_cast<unsigned int>(
(reinterpret_cast<unsigned char *>(buf) - outPtr) +
static_cast<unsigned int>(length));
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if ((*outSize) >= inSize) {
(*outSize) = static_cast<unsigned int>(inSize);
memcpy(outPtr, inPtr, inSize);
}
return true;
}
static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr,
size_t tmpBufSizeInBytes, size_t inLen, int num_channels,
const EXRChannelInfo *channels, int data_width,
int num_lines) {
if (inLen == tmpBufSizeInBytes) {
// Data is not compressed(Issue 40).
memcpy(outPtr, inPtr, inLen);
return true;
}
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !TINYEXR_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
memset(bitmap.data(), 0, BITMAP_SIZE);
const unsigned char *ptr = inPtr;
// minNonZero = *(reinterpret_cast<const unsigned short *>(ptr));
tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr));
// maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2));
tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2));
ptr += 4;
if (maxNonZero >= BITMAP_SIZE) {
return false;
}
if (minNonZero <= maxNonZero) {
memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr,
maxNonZero - minNonZero + 1);
ptr += maxNonZero - minNonZero + 1;
}
std::vector<unsigned short> lut(USHORT_RANGE);
memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE);
unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data());
//
// Huffman decoding
//
int length;
// length = *(reinterpret_cast<const int *>(ptr));
tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr));
ptr += sizeof(int);
if (size_t((ptr - inPtr) + length) > inLen) {
return false;
}
std::vector<unsigned short> tmpBuffer(tmpBufSizeInBytes / sizeof(unsigned short));
hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer);
//
// Wavelet decoding
//
std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels));
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) {
const EXRChannelInfo &chan = channels[i];
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
channelData[i].start = tmpBufferEnd;
channelData[i].end = channelData[i].start;
channelData[i].nx = data_width;
channelData[i].ny = num_lines;
// channelData[i].ys = 1;
channelData[i].size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size;
}
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Expand the pixel data to their original range
//
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSizeInBytes / sizeof(unsigned short)));
for (int y = 0; y < num_lines; y++) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short)));
outPtr += n * sizeof(unsigned short);
cd.end += n;
}
}
return true;
}
#endif // TINYEXR_USE_PIZ
#if TINYEXR_USE_ZFP
struct ZFPCompressionParam {
double rate;
unsigned int precision;
unsigned int __pad0;
double tolerance;
int type; // TINYEXR_ZFP_COMPRESSIONTYPE_*
unsigned int __pad1;
ZFPCompressionParam() {
type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE;
rate = 2.0;
precision = 0;
tolerance = 0.0;
}
};
static bool FindZFPCompressionParam(ZFPCompressionParam *param,
const EXRAttribute *attributes,
int num_attributes, std::string *err) {
bool foundType = false;
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionType") == 0)) {
if (attributes[i].size == 1) {
param->type = static_cast<int>(attributes[i].value[0]);
foundType = true;
break;
} else {
if (err) {
(*err) +=
"zfpCompressionType attribute must be uchar(1 byte) type.\n";
}
return false;
}
}
}
if (!foundType) {
if (err) {
(*err) += "`zfpCompressionType` attribute not found.\n";
}
return false;
}
if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) &&
(attributes[i].size == 8)) {
param->rate = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionRate` attribute not found.\n";
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) &&
(attributes[i].size == 4)) {
param->rate = *(reinterpret_cast<int *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionPrecision` attribute not found.\n";
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) &&
(attributes[i].size == 8)) {
param->tolerance = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionTolerance` attribute not found.\n";
}
} else {
if (err) {
(*err) += "Unknown value specified for `zfpCompressionType`.\n";
}
}
return false;
}
// Assume pixel format is FLOAT for all channels.
static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines,
size_t num_channels, const unsigned char *src,
unsigned long src_size,
const ZFPCompressionParam ¶m) {
size_t uncompressed_size =
size_t(dst_width) * size_t(dst_num_lines) * num_channels;
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
}
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((dst_width % 4) == 0);
assert((dst_num_lines % 4) == 0);
if ((size_t(dst_width) & 3U) || (size_t(dst_num_lines) & 3U)) {
return false;
}
field =
zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)),
zfp_type_float, static_cast<unsigned int>(dst_width),
static_cast<unsigned int>(dst_num_lines) *
static_cast<unsigned int>(num_channels));
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimension */ 2,
/* write random access */ 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
std::vector<unsigned char> buf(buf_size);
memcpy(&buf.at(0), src, src_size);
bitstream *stream = stream_open(&buf.at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_stream_rewind(zfp);
size_t image_size = size_t(dst_width) * size_t(dst_num_lines);
for (size_t c = 0; c < size_t(num_channels); c++) {
// decompress 4x4 pixel block.
for (size_t y = 0; y < size_t(dst_num_lines); y += 4) {
for (size_t x = 0; x < size_t(dst_width); x += 4) {
float fblock[16];
zfp_decode_block_float_2(zfp, fblock);
for (size_t j = 0; j < 4; j++) {
for (size_t i = 0; i < 4; i++) {
dst[c * image_size + ((y + j) * size_t(dst_width) + (x + i))] =
fblock[j * 4 + i];
}
}
}
}
}
zfp_field_free(field);
zfp_stream_close(zfp);
stream_close(stream);
return true;
}
// Assume pixel format is FLOAT for all channels.
static bool CompressZfp(std::vector<unsigned char> *outBuf,
unsigned int *outSize, const float *inPtr, int width,
int num_lines, int num_channels,
const ZFPCompressionParam ¶m) {
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((width % 4) == 0);
assert((num_lines % 4) == 0);
if ((size_t(width) & 3U) || (size_t(num_lines) & 3U)) {
return false;
}
// create input array.
field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)),
zfp_type_float, static_cast<unsigned int>(width),
static_cast<unsigned int>(num_lines * num_channels));
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
outBuf->resize(buf_size);
bitstream *stream = stream_open(&outBuf->at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_field_free(field);
size_t image_size = size_t(width) * size_t(num_lines);
for (size_t c = 0; c < size_t(num_channels); c++) {
// compress 4x4 pixel block.
for (size_t y = 0; y < size_t(num_lines); y += 4) {
for (size_t x = 0; x < size_t(width); x += 4) {
float fblock[16];
for (size_t j = 0; j < 4; j++) {
for (size_t i = 0; i < 4; i++) {
fblock[j * 4 + i] =
inPtr[c * image_size + ((y + j) * size_t(width) + (x + i))];
}
}
zfp_encode_block_float_2(zfp, fblock);
}
}
}
zfp_stream_flush(zfp);
(*outSize) = static_cast<unsigned int>(zfp_stream_compressed_size(zfp));
zfp_stream_close(zfp);
return true;
}
#endif
//
// -----------------------------------------------------------------
//
// heuristics
#define TINYEXR_DIMENSION_THRESHOLD (1024 * 8192)
// TODO(syoyo): Refactor function arguments.
static bool DecodePixelData(/* out */ unsigned char **out_images,
const int *requested_pixel_types,
const unsigned char *data_ptr, size_t data_len,
int compression_type, int line_order, int width,
int height, int x_stride, int y, int line_no,
int num_lines, size_t pixel_data_size,
size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ
#if TINYEXR_USE_PIZ
if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) {
// Invalid input #90
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(
static_cast<size_t>(width * num_lines) * pixel_data_size));
size_t tmpBufLen = outBuf.size();
bool ret = tinyexr::DecompressPiz(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen,
data_len, static_cast<int>(num_channels), channels, width, num_lines);
if (!ret) {
return false;
}
// For PIZ_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
FP16 hf;
// hf.u = line_ptr[u];
// use `cpy` to avoid unaligned memory access when compiler's
// optimization is on.
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(&outBuf.at(
v * pixel_data_size * static_cast<size_t>(x_stride) +
channel_offset_list[c] * static_cast<size_t>(x_stride)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
}
}
#else
assert(0 && "PIZ is enabled in this build");
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS ||
compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
assert(dstLen > 0);
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For ZIP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
if (dstLen == 0) {
return false;
}
if (!tinyexr::DecompressRle(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For RLE_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
std::string e;
if (!tinyexr::FindZFPCompressionParam(&zfp_compression_param, attributes,
int(num_attributes), &e)) {
// This code path should not be reachable.
assert(0);
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = outBuf.size();
assert(dstLen > 0);
tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width,
num_lines, num_channels, data_ptr,
static_cast<unsigned long>(data_len),
zfp_compression_param);
// For ZFP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
#else
(void)attributes;
(void)num_attributes;
(void)num_channels;
assert(0);
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
for (size_t c = 0; c < num_channels; c++) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
const unsigned short *line_ptr =
reinterpret_cast<const unsigned short *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *outLine =
reinterpret_cast<unsigned short *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
outLine[u] = hf.u;
}
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// address may not be aliged. use byte-wise copy for safety.#76
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
tinyexr::FP32 f32 = half_to_float(hf);
outLine[u] = f32.f;
}
} else {
assert(0);
return false;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
const float *line_ptr = reinterpret_cast<const float *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
unsigned int *outLine =
reinterpret_cast<unsigned int *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
if (reinterpret_cast<const unsigned char *>(line_ptr + u) >=
(data_ptr + data_len)) {
// Corrupsed data?
return false;
}
unsigned int val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
}
}
}
}
return true;
}
static bool DecodeTiledPixelData(
unsigned char **out_images, int *width, int *height,
const int *requested_pixel_types, const unsigned char *data_ptr,
size_t data_len, int compression_type, int line_order, int data_width,
int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x,
int tile_size_y, size_t pixel_data_size, size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
// Here, data_width and data_height are the dimensions of the current (sub)level.
if (tile_size_x * tile_offset_x > data_width ||
tile_size_y * tile_offset_y > data_height) {
return false;
}
// Compute actual image size in a tile.
if ((tile_offset_x + 1) * tile_size_x >= data_width) {
(*width) = data_width - (tile_offset_x * tile_size_x);
} else {
(*width) = tile_size_x;
}
if ((tile_offset_y + 1) * tile_size_y >= data_height) {
(*height) = data_height - (tile_offset_y * tile_size_y);
} else {
(*height) = tile_size_y;
}
// Image size = tile size.
return DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len,
compression_type, line_order, (*width), tile_size_y,
/* stride */ tile_size_x, /* y */ 0, /* line_no */ 0,
(*height), pixel_data_size, num_attributes, attributes,
num_channels, channels, channel_offset_list);
}
static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list,
int *pixel_data_size, size_t *channel_offset,
int num_channels,
const EXRChannelInfo *channels) {
channel_offset_list->resize(static_cast<size_t>(num_channels));
(*pixel_data_size) = 0;
(*channel_offset) = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
(*channel_offset_list)[c] = (*channel_offset);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
(*pixel_data_size) += sizeof(unsigned short);
(*channel_offset) += sizeof(unsigned short);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
(*pixel_data_size) += sizeof(float);
(*channel_offset) += sizeof(float);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
(*pixel_data_size) += sizeof(unsigned int);
(*channel_offset) += sizeof(unsigned int);
} else {
// ???
return false;
}
}
return true;
}
static unsigned char **AllocateImage(int num_channels,
const EXRChannelInfo *channels,
const int *requested_pixel_types,
int data_width, int data_height) {
unsigned char **images =
reinterpret_cast<unsigned char **>(static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(num_channels))));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
size_t data_len =
static_cast<size_t>(data_width) * static_cast<size_t>(data_height);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
// pixel_data_size += sizeof(unsigned short);
// channel_offset += sizeof(unsigned short);
// Alloc internal image for half type.
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
images[c] =
reinterpret_cast<unsigned char *>(static_cast<unsigned short *>(
malloc(sizeof(unsigned short) * data_len)));
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
// pixel_data_size += sizeof(float);
// channel_offset += sizeof(float);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
// pixel_data_size += sizeof(unsigned int);
// channel_offset += sizeof(unsigned int);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len)));
} else {
assert(0);
}
}
return images;
}
#ifdef _WIN32
static inline std::wstring UTF8ToWchar(const std::string &str) {
int wstr_size =
MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), NULL, 0);
std::wstring wstr(wstr_size, 0);
MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), &wstr[0],
(int)wstr.size());
return wstr;
}
#endif
static int ParseEXRHeader(HeaderInfo *info, bool *empty_header,
const EXRVersion *version, std::string *err,
const unsigned char *buf, size_t size) {
const char *marker = reinterpret_cast<const char *>(&buf[0]);
if (empty_header) {
(*empty_header) = false;
}
if (version->multipart) {
if (size > 0 && marker[0] == '\0') {
// End of header list.
if (empty_header) {
(*empty_header) = true;
}
return TINYEXR_SUCCESS;
}
}
// According to the spec, the header of every OpenEXR file must contain at
// least the following attributes:
//
// channels chlist
// compression compression
// dataWindow box2i
// displayWindow box2i
// lineOrder lineOrder
// pixelAspectRatio float
// screenWindowCenter v2f
// screenWindowWidth float
bool has_channels = false;
bool has_compression = false;
bool has_data_window = false;
bool has_display_window = false;
bool has_line_order = false;
bool has_pixel_aspect_ratio = false;
bool has_screen_window_center = false;
bool has_screen_window_width = false;
bool has_name = false;
bool has_type = false;
info->name.clear();
info->type.clear();
info->data_window.min_x = 0;
info->data_window.min_y = 0;
info->data_window.max_x = 0;
info->data_window.max_y = 0;
info->line_order = 0; // @fixme
info->display_window.min_x = 0;
info->display_window.min_y = 0;
info->display_window.max_x = 0;
info->display_window.max_y = 0;
info->screen_window_center[0] = 0.0f;
info->screen_window_center[1] = 0.0f;
info->screen_window_width = -1.0f;
info->pixel_aspect_ratio = -1.0f;
info->tiled = 0;
info->tile_size_x = -1;
info->tile_size_y = -1;
info->tile_level_mode = -1;
info->tile_rounding_mode = -1;
info->attributes.clear();
// Read attributes
size_t orig_size = size;
for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) {
if (0 == size) {
if (err) {
(*err) += "Insufficient data size for attributes.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
if (err) {
(*err) += "Failed to read attribute.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
// For a multipart file, the version field 9th bit is 0.
if ((version->tiled || version->multipart || version->non_image) && attr_name.compare("tiles") == 0) {
unsigned int x_size, y_size;
unsigned char tile_mode;
if (data.size() != 9) {
if (err) {
(*err) += "(ParseEXRHeader) Invalid attribute data size. Attribute data size must be 9.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
assert(data.size() == 9);
memcpy(&x_size, &data.at(0), sizeof(int));
memcpy(&y_size, &data.at(4), sizeof(int));
tile_mode = data[8];
tinyexr::swap4(&x_size);
tinyexr::swap4(&y_size);
if (x_size > static_cast<unsigned int>(std::numeric_limits<int>::max()) ||
y_size > static_cast<unsigned int>(std::numeric_limits<int>::max())) {
if (err) {
(*err) = "Tile sizes were invalid.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->tile_size_x = static_cast<int>(x_size);
info->tile_size_y = static_cast<int>(y_size);
// mode = levelMode + roundingMode * 16
info->tile_level_mode = tile_mode & 0x3;
info->tile_rounding_mode = (tile_mode >> 4) & 0x1;
info->tiled = 1;
} else if (attr_name.compare("compression") == 0) {
bool ok = false;
if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) {
ok = true;
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
ok = true;
#else
if (err) {
(*err) = "PIZ compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
ok = true;
#else
if (err) {
(*err) = "ZFP compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (!ok) {
if (err) {
(*err) = "Unknown compression type.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->compression_type = static_cast<int>(data[0]);
has_compression = true;
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!ReadChannelInfo(info->channels, data)) {
if (err) {
(*err) += "Failed to parse channel info.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (info->channels.size() < 1) {
if (err) {
(*err) += "# of channels is zero.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
has_channels = true;
} else if (attr_name.compare("dataWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->data_window.min_x, &data.at(0), sizeof(int));
memcpy(&info->data_window.min_y, &data.at(4), sizeof(int));
memcpy(&info->data_window.max_x, &data.at(8), sizeof(int));
memcpy(&info->data_window.max_y, &data.at(12), sizeof(int));
tinyexr::swap4(&info->data_window.min_x);
tinyexr::swap4(&info->data_window.min_y);
tinyexr::swap4(&info->data_window.max_x);
tinyexr::swap4(&info->data_window.max_y);
has_data_window = true;
}
} else if (attr_name.compare("displayWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->display_window.min_x, &data.at(0), sizeof(int));
memcpy(&info->display_window.min_y, &data.at(4), sizeof(int));
memcpy(&info->display_window.max_x, &data.at(8), sizeof(int));
memcpy(&info->display_window.max_y, &data.at(12), sizeof(int));
tinyexr::swap4(&info->display_window.min_x);
tinyexr::swap4(&info->display_window.min_y);
tinyexr::swap4(&info->display_window.max_x);
tinyexr::swap4(&info->display_window.max_y);
has_display_window = true;
}
} else if (attr_name.compare("lineOrder") == 0) {
if (data.size() >= 1) {
info->line_order = static_cast<int>(data[0]);
has_line_order = true;
}
} else if (attr_name.compare("pixelAspectRatio") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float));
tinyexr::swap4(&info->pixel_aspect_ratio);
has_pixel_aspect_ratio = true;
}
} else if (attr_name.compare("screenWindowCenter") == 0) {
if (data.size() >= 8) {
memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float));
memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float));
tinyexr::swap4(&info->screen_window_center[0]);
tinyexr::swap4(&info->screen_window_center[1]);
has_screen_window_center = true;
}
} else if (attr_name.compare("screenWindowWidth") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->screen_window_width, &data.at(0), sizeof(float));
tinyexr::swap4(&info->screen_window_width);
has_screen_window_width = true;
}
} else if (attr_name.compare("chunkCount") == 0) {
if (data.size() >= sizeof(int)) {
memcpy(&info->chunk_count, &data.at(0), sizeof(int));
tinyexr::swap4(&info->chunk_count);
}
} else if (attr_name.compare("name") == 0) {
if (!data.empty() && data[0]) {
data.push_back(0);
size_t len = strlen(reinterpret_cast<const char*>(&data[0]));
info->name.resize(len);
info->name.assign(reinterpret_cast<const char*>(&data[0]), len);
has_name = true;
}
} else if (attr_name.compare("type") == 0) {
if (!data.empty() && data[0]) {
data.push_back(0);
size_t len = strlen(reinterpret_cast<const char*>(&data[0]));
info->type.resize(len);
info->type.assign(reinterpret_cast<const char*>(&data[0]), len);
has_type = true;
}
} else {
// Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES)
if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
EXRAttribute attrib;
#ifdef _MSC_VER
strncpy_s(attrib.name, attr_name.c_str(), 255);
strncpy_s(attrib.type, attr_type.c_str(), 255);
#else
strncpy(attrib.name, attr_name.c_str(), 255);
strncpy(attrib.type, attr_type.c_str(), 255);
#endif
attrib.name[255] = '\0';
attrib.type[255] = '\0';
attrib.size = static_cast<int>(data.size());
attrib.value = static_cast<unsigned char *>(malloc(data.size()));
memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0),
data.size());
info->attributes.push_back(attrib);
}
}
}
// Check if required attributes exist
{
std::stringstream ss_err;
if (!has_compression) {
ss_err << "\"compression\" attribute not found in the header."
<< std::endl;
}
if (!has_channels) {
ss_err << "\"channels\" attribute not found in the header." << std::endl;
}
if (!has_line_order) {
ss_err << "\"lineOrder\" attribute not found in the header." << std::endl;
}
if (!has_display_window) {
ss_err << "\"displayWindow\" attribute not found in the header."
<< std::endl;
}
if (!has_data_window) {
ss_err << "\"dataWindow\" attribute not found in the header or invalid."
<< std::endl;
}
if (!has_pixel_aspect_ratio) {
ss_err << "\"pixelAspectRatio\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_width) {
ss_err << "\"screenWindowWidth\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_center) {
ss_err << "\"screenWindowCenter\" attribute not found in the header."
<< std::endl;
}
if (version->multipart || version->non_image) {
if (!has_name) {
ss_err << "\"name\" attribute not found in the header."
<< std::endl;
}
if (!has_type) {
ss_err << "\"type\" attribute not found in the header."
<< std::endl;
}
}
if (!(ss_err.str().empty())) {
if (err) {
(*err) += ss_err.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
}
info->header_len = static_cast<unsigned int>(orig_size - size);
return TINYEXR_SUCCESS;
}
// C++ HeaderInfo to C EXRHeader conversion.
static bool ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info, std::string *warn, std::string *err) {
exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio;
exr_header->screen_window_center[0] = info.screen_window_center[0];
exr_header->screen_window_center[1] = info.screen_window_center[1];
exr_header->screen_window_width = info.screen_window_width;
exr_header->chunk_count = info.chunk_count;
exr_header->display_window.min_x = info.display_window.min_x;
exr_header->display_window.min_y = info.display_window.min_y;
exr_header->display_window.max_x = info.display_window.max_x;
exr_header->display_window.max_y = info.display_window.max_y;
exr_header->data_window.min_x = info.data_window.min_x;
exr_header->data_window.min_y = info.data_window.min_y;
exr_header->data_window.max_x = info.data_window.max_x;
exr_header->data_window.max_y = info.data_window.max_y;
exr_header->line_order = info.line_order;
exr_header->compression_type = info.compression_type;
exr_header->tiled = info.tiled;
exr_header->tile_size_x = info.tile_size_x;
exr_header->tile_size_y = info.tile_size_y;
exr_header->tile_level_mode = info.tile_level_mode;
exr_header->tile_rounding_mode = info.tile_rounding_mode;
EXRSetNameAttr(exr_header, info.name.c_str());
bool valid = true;
if (!info.type.empty()) {
if (info.type == "scanlineimage") {
if (exr_header->tiled) {
if (err) {
(*err) += "(ConvertHeader) tiled bit must be off for `scanlineimage` type.\n";
}
valid = false;
}
} else if (info.type == "tiledimage") {
if (!exr_header->tiled) {
if (err) {
(*err) += "(ConvertHeader) tiled bit must be on for `tiledimage` type.\n";
}
valid = false;
}
} else if (info.type == "deeptile") {
exr_header->non_image = 1;
if (!exr_header->tiled) {
if (err) {
(*err) += "(ConvertHeader) tiled bit must be on for `deeptile` type.\n";
}
valid = false;
}
} else if (info.type == "deepscanline") {
exr_header->non_image = 1;
if (exr_header->tiled) {
if (err) {
(*err) += "(ConvertHeader) tiled bit must be off for `deepscanline` type.\n";
}
valid = false;
}
} else {
if (warn) {
std::stringstream ss;
ss << "(ConvertHeader) Unsupported or unknown info.type: " << info.type << "\n";
(*warn) += ss.str();
}
}
}
exr_header->num_channels = static_cast<int>(info.channels.size());
exr_header->channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
#ifdef _MSC_VER
strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#else
strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#endif
// manually add '\0' for safety.
exr_header->channels[c].name[255] = '\0';
exr_header->channels[c].pixel_type = info.channels[c].pixel_type;
exr_header->channels[c].p_linear = info.channels[c].p_linear;
exr_header->channels[c].x_sampling = info.channels[c].x_sampling;
exr_header->channels[c].y_sampling = info.channels[c].y_sampling;
}
exr_header->pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->pixel_types[c] = info.channels[c].pixel_type;
}
// Initially fill with values of `pixel_types`
exr_header->requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->requested_pixel_types[c] = info.channels[c].pixel_type;
}
exr_header->num_custom_attributes = static_cast<int>(info.attributes.size());
if (exr_header->num_custom_attributes > 0) {
// TODO(syoyo): Report warning when # of attributes exceeds
// `TINYEXR_MAX_CUSTOM_ATTRIBUTES`
if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES;
}
exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc(
sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes)));
for (size_t i = 0; i < info.attributes.size(); i++) {
memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name,
256);
memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type,
256);
exr_header->custom_attributes[i].size = info.attributes[i].size;
// Just copy pointer
exr_header->custom_attributes[i].value = info.attributes[i].value;
}
} else {
exr_header->custom_attributes = NULL;
}
exr_header->header_len = info.header_len;
return true;
}
struct OffsetData {
OffsetData() : num_x_levels(0), num_y_levels(0) {}
std::vector<std::vector<std::vector <tinyexr::tinyexr_uint64> > > offsets;
int num_x_levels;
int num_y_levels;
};
int LevelIndex(int lx, int ly, int tile_level_mode, int num_x_levels) {
switch (tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
return 0;
case TINYEXR_TILE_MIPMAP_LEVELS:
return lx;
case TINYEXR_TILE_RIPMAP_LEVELS:
return lx + ly * num_x_levels;
default:
assert(false);
}
return 0;
}
static int LevelSize(int toplevel_size, int level, int tile_rounding_mode) {
assert(level >= 0);
int b = (int)(1u << (unsigned)level);
int level_size = toplevel_size / b;
if (tile_rounding_mode == TINYEXR_TILE_ROUND_UP && level_size * b < toplevel_size)
level_size += 1;
return std::max(level_size, 1);
}
static int DecodeTiledLevel(EXRImage* exr_image, const EXRHeader* exr_header,
const OffsetData& offset_data,
const std::vector<size_t>& channel_offset_list,
int pixel_data_size,
const unsigned char* head, const size_t size,
std::string* err) {
int num_channels = exr_header->num_channels;
int level_index = LevelIndex(exr_image->level_x, exr_image->level_y, exr_header->tile_level_mode, offset_data.num_x_levels);
int num_y_tiles = (int)offset_data.offsets[level_index].size();
assert(num_y_tiles);
int num_x_tiles = (int)offset_data.offsets[level_index][0].size();
assert(num_x_tiles);
int num_tiles = num_x_tiles * num_y_tiles;
int err_code = TINYEXR_SUCCESS;
enum {
EF_SUCCESS = 0,
EF_INVALID_DATA = 1,
EF_INSUFFICIENT_DATA = 2,
EF_FAILED_TO_DECODE = 4
};
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<unsigned> error_flag(EF_SUCCESS);
#else
unsigned error_flag(EF_SUCCESS);
#endif
// Although the spec says : "...the data window is subdivided into an array of smaller rectangles...",
// the IlmImf library allows the dimensions of the tile to be larger (or equal) than the dimensions of the data window.
#if 0
if ((exr_header->tile_size_x > exr_image->width || exr_header->tile_size_y > exr_image->height) &&
exr_image->level_x == 0 && exr_image->level_y == 0) {
if (err) {
(*err) += "Failed to decode tile data.\n";
}
err_code = TINYEXR_ERROR_INVALID_DATA;
}
#endif
exr_image->tiles = static_cast<EXRTile*>(
calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles)));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> tile_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_tiles)) {
num_threads = int(num_tiles);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]()
{
int tile_idx = 0;
while ((tile_idx = tile_count++) < num_tiles) {
#else
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int tile_idx = 0; tile_idx < num_tiles; tile_idx++) {
#endif
// Allocate memory for each tile.
exr_image->tiles[tile_idx].images = tinyexr::AllocateImage(
num_channels, exr_header->channels,
exr_header->requested_pixel_types, exr_header->tile_size_x,
exr_header->tile_size_y);
int x_tile = tile_idx % num_x_tiles;
int y_tile = tile_idx / num_x_tiles;
// 16 byte: tile coordinates
// 4 byte : data size
// ~ : data(uncompressed or compressed)
tinyexr::tinyexr_uint64 offset = offset_data.offsets[level_index][y_tile][x_tile];
if (offset + sizeof(int) * 5 > size) {
// Insufficient data size.
error_flag |= EF_INSUFFICIENT_DATA;
continue;
}
size_t data_size =
size_t(size - (offset + sizeof(int) * 5));
const unsigned char* data_ptr =
reinterpret_cast<const unsigned char*>(head + offset);
int tile_coordinates[4];
memcpy(tile_coordinates, data_ptr, sizeof(int) * 4);
tinyexr::swap4(&tile_coordinates[0]);
tinyexr::swap4(&tile_coordinates[1]);
tinyexr::swap4(&tile_coordinates[2]);
tinyexr::swap4(&tile_coordinates[3]);
if (tile_coordinates[2] != exr_image->level_x) {
// Invalid data.
error_flag |= EF_INVALID_DATA;
continue;
}
if (tile_coordinates[3] != exr_image->level_y) {
// Invalid data.
error_flag |= EF_INVALID_DATA;
continue;
}
int data_len;
memcpy(&data_len, data_ptr + 16,
sizeof(int)); // 16 = sizeof(tile_coordinates)
tinyexr::swap4(&data_len);
if (data_len < 2 || size_t(data_len) > data_size) {
// Insufficient data size.
error_flag |= EF_INSUFFICIENT_DATA;
continue;
}
// Move to data addr: 20 = 16 + 4;
data_ptr += 20;
bool ret = tinyexr::DecodeTiledPixelData(
exr_image->tiles[tile_idx].images,
&(exr_image->tiles[tile_idx].width),
&(exr_image->tiles[tile_idx].height),
exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order,
exr_image->width, exr_image->height,
tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x,
exr_header->tile_size_y, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list);
if (!ret) {
// Failed to decode tile data.
error_flag |= EF_FAILED_TO_DECODE;
}
exr_image->tiles[tile_idx].offset_x = tile_coordinates[0];
exr_image->tiles[tile_idx].offset_y = tile_coordinates[1];
exr_image->tiles[tile_idx].level_x = tile_coordinates[2];
exr_image->tiles[tile_idx].level_y = tile_coordinates[3];
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
} // num_thread loop
for (auto& t : workers) {
t.join();
}
#else
} // parallel for
#endif
// Even in the event of an error, the reserved memory may be freed.
exr_image->num_channels = num_channels;
exr_image->num_tiles = static_cast<int>(num_tiles);
if (error_flag) err_code = TINYEXR_ERROR_INVALID_DATA;
if (err) {
if (error_flag & EF_INSUFFICIENT_DATA) {
(*err) += "Insufficient data length.\n";
}
if (error_flag & EF_FAILED_TO_DECODE) {
(*err) += "Failed to decode tile data.\n";
}
}
return err_code;
}
static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header,
const OffsetData& offset_data,
const unsigned char *head, const size_t size,
std::string *err) {
int num_channels = exr_header->num_channels;
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
if (!FindZFPCompressionParam(&zfp_compression_param,
exr_header->custom_attributes,
int(exr_header->num_custom_attributes), err)) {
return TINYEXR_ERROR_INVALID_HEADER;
}
#endif
}
if (exr_header->data_window.max_x < exr_header->data_window.min_x ||
exr_header->data_window.max_y < exr_header->data_window.min_y) {
if (err) {
(*err) += "Invalid data window.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int data_width =
exr_header->data_window.max_x - exr_header->data_window.min_x + 1;
int data_height =
exr_header->data_window.max_y - exr_header->data_window.min_y + 1;
// Do not allow too large data_width and data_height. header invalid?
{
if ((data_width > TINYEXR_DIMENSION_THRESHOLD) || (data_height > TINYEXR_DIMENSION_THRESHOLD)) {
if (err) {
std::stringstream ss;
ss << "data_with or data_height too large. data_width: " << data_width
<< ", "
<< "data_height = " << data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (exr_header->tiled) {
if ((exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) || (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD)) {
if (err) {
std::stringstream ss;
ss << "tile with or tile height too large. tile width: " << exr_header->tile_size_x
<< ", "
<< "tile height = " << exr_header->tile_size_y << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
const std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
size_t num_blocks = offsets.size();
std::vector<size_t> channel_offset_list;
int pixel_data_size = 0;
size_t channel_offset = 0;
if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size,
&channel_offset, num_channels,
exr_header->channels)) {
if (err) {
(*err) += "Failed to compute channel layout.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
#else
bool invalid_data(false);
#endif
if (exr_header->tiled) {
// value check
if (exr_header->tile_size_x < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_size_y < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) {
EXRImage* level_image = NULL;
for (int level = 0; level < offset_data.num_x_levels; ++level) {
if (!level_image) {
level_image = exr_image;
} else {
level_image->next_level = new EXRImage;
InitEXRImage(level_image->next_level);
level_image = level_image->next_level;
}
level_image->width =
LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level, exr_header->tile_rounding_mode);
level_image->height =
LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level, exr_header->tile_rounding_mode);
level_image->level_x = level;
level_image->level_y = level;
int ret = DecodeTiledLevel(level_image, exr_header,
offset_data,
channel_offset_list,
pixel_data_size,
head, size,
err);
if (ret != TINYEXR_SUCCESS) return ret;
}
} else {
EXRImage* level_image = NULL;
for (int level_y = 0; level_y < offset_data.num_y_levels; ++level_y)
for (int level_x = 0; level_x < offset_data.num_x_levels; ++level_x) {
if (!level_image) {
level_image = exr_image;
} else {
level_image->next_level = new EXRImage;
InitEXRImage(level_image->next_level);
level_image = level_image->next_level;
}
level_image->width =
LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level_x, exr_header->tile_rounding_mode);
level_image->height =
LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level_y, exr_header->tile_rounding_mode);
level_image->level_x = level_x;
level_image->level_y = level_y;
int ret = DecodeTiledLevel(level_image, exr_header,
offset_data,
channel_offset_list,
pixel_data_size,
head, size,
err);
if (ret != TINYEXR_SUCCESS) return ret;
}
}
} else { // scanline format
// Don't allow too large image(256GB * pixel_data_size or more). Workaround
// for #104.
size_t total_data_len =
size_t(data_width) * size_t(data_height) * size_t(num_channels);
const bool total_data_len_overflown =
sizeof(void *) == 8 ? (total_data_len >= 0x4000000000) : false;
if ((total_data_len == 0) || total_data_len_overflown) {
if (err) {
std::stringstream ss;
ss << "Image data size is zero or too large: width = " << data_width
<< ", height = " << data_height << ", channels = " << num_channels
<< std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
exr_image->images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> y_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_blocks)) {
num_threads = int(num_blocks);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int y = 0;
while ((y = y_count++) < int(num_blocks)) {
#else
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int y = 0; y < static_cast<int>(num_blocks); y++) {
#endif
size_t y_idx = static_cast<size_t>(y);
if (offsets[y_idx] + sizeof(int) * 2 > size) {
invalid_data = true;
} else {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed or compressed)
size_t data_size =
size_t(size - (offsets[y_idx] + sizeof(int) * 2));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y_idx]);
int line_no;
memcpy(&line_no, data_ptr, sizeof(int));
int data_len;
memcpy(&data_len, data_ptr + 4, sizeof(int));
tinyexr::swap4(&line_no);
tinyexr::swap4(&data_len);
if (size_t(data_len) > data_size) {
invalid_data = true;
} else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) {
// Too large value. Assume this is invalid
// 2**20 = 1048576 = heuristic value.
invalid_data = true;
} else if (data_len == 0) {
// TODO(syoyo): May be ok to raise the threshold for example
// `data_len < 4`
invalid_data = true;
} else {
// line_no may be negative.
int end_line_no = (std::min)(line_no + num_scanline_blocks,
(exr_header->data_window.max_y + 1));
int num_lines = end_line_no - line_no;
if (num_lines <= 0) {
invalid_data = true;
} else {
// Move to data addr: 8 = 4 + 4;
data_ptr += 8;
// Adjust line_no with data_window.bmin.y
// overflow check
tinyexr_int64 lno =
static_cast<tinyexr_int64>(line_no) -
static_cast<tinyexr_int64>(exr_header->data_window.min_y);
if (lno > std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else if (lno < -std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else {
line_no -= exr_header->data_window.min_y;
}
if (line_no < 0) {
invalid_data = true;
} else {
if (!tinyexr::DecodePixelData(
exr_image->images, exr_header->requested_pixel_types,
data_ptr, static_cast<size_t>(data_len),
exr_header->compression_type, exr_header->line_order,
data_width, data_height, data_width, y, line_no,
num_lines, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(
exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list)) {
invalid_data = true;
}
}
}
}
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
}
if (invalid_data) {
if (err) {
(*err) += "Invalid data found when decoding pixels.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Overwrite `pixel_type` with `requested_pixel_type`.
{
for (int c = 0; c < exr_header->num_channels; c++) {
exr_header->pixel_types[c] = exr_header->requested_pixel_types[c];
}
}
{
exr_image->num_channels = num_channels;
exr_image->width = data_width;
exr_image->height = data_height;
}
return TINYEXR_SUCCESS;
}
static bool ReconstructLineOffsets(
std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n,
const unsigned char *head, const unsigned char *marker, const size_t size) {
assert(head < marker);
assert(offsets->size() == n);
for (size_t i = 0; i < n; i++) {
size_t offset = static_cast<size_t>(marker - head);
// Offset should not exceed whole EXR file/data size.
if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) {
return false;
}
int y;
unsigned int data_len;
memcpy(&y, marker, sizeof(int));
memcpy(&data_len, marker + 4, sizeof(unsigned int));
if (data_len >= size) {
return false;
}
tinyexr::swap4(&y);
tinyexr::swap4(&data_len);
(*offsets)[i] = offset;
marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len)
}
return true;
}
static int FloorLog2(unsigned x) {
//
// For x > 0, floorLog2(y) returns floor(log(x)/log(2)).
//
int y = 0;
while (x > 1) {
y += 1;
x >>= 1u;
}
return y;
}
static int CeilLog2(unsigned x) {
//
// For x > 0, ceilLog2(y) returns ceil(log(x)/log(2)).
//
int y = 0;
int r = 0;
while (x > 1) {
if (x & 1)
r = 1;
y += 1;
x >>= 1u;
}
return y + r;
}
static int RoundLog2(int x, int tile_rounding_mode) {
return (tile_rounding_mode == TINYEXR_TILE_ROUND_DOWN) ? FloorLog2(static_cast<unsigned>(x)) : CeilLog2(static_cast<unsigned>(x));
}
static int CalculateNumXLevels(const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num = 0;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
num = 1;
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
int h = max_y - min_y + 1;
num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
num = RoundLog2(w, exr_header->tile_rounding_mode) + 1;
}
break;
default:
assert(false);
}
return num;
}
static int CalculateNumYLevels(const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num = 0;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
num = 1;
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
int h = max_y - min_y + 1;
num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
int h = max_y - min_y + 1;
num = RoundLog2(h, exr_header->tile_rounding_mode) + 1;
}
break;
default:
assert(false);
}
return num;
}
static void CalculateNumTiles(std::vector<int>& numTiles,
int toplevel_size,
int size,
int tile_rounding_mode) {
for (unsigned i = 0; i < numTiles.size(); i++) {
int l = LevelSize(toplevel_size, i, tile_rounding_mode);
assert(l <= std::numeric_limits<int>::max() - size + 1);
numTiles[i] = (l + size - 1) / size;
}
}
static void PrecalculateTileInfo(std::vector<int>& num_x_tiles,
std::vector<int>& num_y_tiles,
const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num_x_levels = CalculateNumXLevels(exr_header);
int num_y_levels = CalculateNumYLevels(exr_header);
num_x_tiles.resize(num_x_levels);
num_y_tiles.resize(num_y_levels);
CalculateNumTiles(num_x_tiles,
max_x - min_x + 1,
exr_header->tile_size_x,
exr_header->tile_rounding_mode);
CalculateNumTiles(num_y_tiles,
max_y - min_y + 1,
exr_header->tile_size_y,
exr_header->tile_rounding_mode);
}
static void InitSingleResolutionOffsets(OffsetData& offset_data, size_t num_blocks) {
offset_data.offsets.resize(1);
offset_data.offsets[0].resize(1);
offset_data.offsets[0][0].resize(num_blocks);
offset_data.num_x_levels = 1;
offset_data.num_y_levels = 1;
}
// Return sum of tile blocks.
static int InitTileOffsets(OffsetData& offset_data,
const EXRHeader* exr_header,
const std::vector<int>& num_x_tiles,
const std::vector<int>& num_y_tiles) {
int num_tile_blocks = 0;
offset_data.num_x_levels = static_cast<int>(num_x_tiles.size());
offset_data.num_y_levels = static_cast<int>(num_y_tiles.size());
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
case TINYEXR_TILE_MIPMAP_LEVELS:
assert(offset_data.num_x_levels == offset_data.num_y_levels);
offset_data.offsets.resize(offset_data.num_x_levels);
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
offset_data.offsets[l].resize(num_y_tiles[l]);
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
offset_data.offsets[l][dy].resize(num_x_tiles[l]);
num_tile_blocks += num_x_tiles[l];
}
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
offset_data.offsets.resize(static_cast<size_t>(offset_data.num_x_levels) * static_cast<size_t>(offset_data.num_y_levels));
for (int ly = 0; ly < offset_data.num_y_levels; ++ly) {
for (int lx = 0; lx < offset_data.num_x_levels; ++lx) {
int l = ly * offset_data.num_x_levels + lx;
offset_data.offsets[l].resize(num_y_tiles[ly]);
for (size_t dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
offset_data.offsets[l][dy].resize(num_x_tiles[lx]);
num_tile_blocks += num_x_tiles[lx];
}
}
}
break;
default:
assert(false);
}
return num_tile_blocks;
}
static bool IsAnyOffsetsAreInvalid(const OffsetData& offset_data) {
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l)
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy)
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx)
if (reinterpret_cast<const tinyexr::tinyexr_int64&>(offset_data.offsets[l][dy][dx]) <= 0)
return true;
return false;
}
static bool isValidTile(const EXRHeader* exr_header,
const OffsetData& offset_data,
int dx, int dy, int lx, int ly) {
if (lx < 0 || ly < 0 || dx < 0 || dy < 0) return false;
int num_x_levels = offset_data.num_x_levels;
int num_y_levels = offset_data.num_y_levels;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
if (lx == 0 &&
ly == 0 &&
offset_data.offsets.size() > 0 &&
offset_data.offsets[0].size() > static_cast<size_t>(dy) &&
offset_data.offsets[0][dy].size() > static_cast<size_t>(dx)) {
return true;
}
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
if (lx < num_x_levels &&
ly < num_y_levels &&
offset_data.offsets.size() > static_cast<size_t>(lx) &&
offset_data.offsets[lx].size() > static_cast<size_t>(dy) &&
offset_data.offsets[lx][dy].size() > static_cast<size_t>(dx)) {
return true;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
size_t idx = static_cast<size_t>(lx) + static_cast<size_t>(ly)* static_cast<size_t>(num_x_levels);
if (lx < num_x_levels &&
ly < num_y_levels &&
(offset_data.offsets.size() > idx) &&
offset_data.offsets[idx].size() > static_cast<size_t>(dy) &&
offset_data.offsets[idx][dy].size() > static_cast<size_t>(dx)) {
return true;
}
}
break;
default:
return false;
}
return false;
}
static void ReconstructTileOffsets(OffsetData& offset_data,
const EXRHeader* exr_header,
const unsigned char* head, const unsigned char* marker, const size_t /*size*/,
bool isMultiPartFile,
bool isDeep) {
int numXLevels = offset_data.num_x_levels;
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 tileOffset = marker - head;
if (isMultiPartFile) {
//int partNumber;
marker += sizeof(int);
}
int tileX;
memcpy(&tileX, marker, sizeof(int));
tinyexr::swap4(&tileX);
marker += sizeof(int);
int tileY;
memcpy(&tileY, marker, sizeof(int));
tinyexr::swap4(&tileY);
marker += sizeof(int);
int levelX;
memcpy(&levelX, marker, sizeof(int));
tinyexr::swap4(&levelX);
marker += sizeof(int);
int levelY;
memcpy(&levelY, marker, sizeof(int));
tinyexr::swap4(&levelY);
marker += sizeof(int);
if (isDeep) {
tinyexr::tinyexr_int64 packed_offset_table_size;
memcpy(&packed_offset_table_size, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_offset_table_size));
marker += sizeof(tinyexr::tinyexr_int64);
tinyexr::tinyexr_int64 packed_sample_size;
memcpy(&packed_sample_size, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_sample_size));
marker += sizeof(tinyexr::tinyexr_int64);
// next Int64 is unpacked sample size - skip that too
marker += packed_offset_table_size + packed_sample_size + 8;
} else {
int dataSize;
memcpy(&dataSize, marker, sizeof(int));
tinyexr::swap4(&dataSize);
marker += sizeof(int);
marker += dataSize;
}
if (!isValidTile(exr_header, offset_data,
tileX, tileY, levelX, levelY))
return;
int level_idx = LevelIndex(levelX, levelY, exr_header->tile_level_mode, numXLevels);
offset_data.offsets[level_idx][tileY][tileX] = tileOffset;
}
}
}
}
// marker output is also
static int ReadOffsets(OffsetData& offset_data,
const unsigned char* head,
const unsigned char*& marker,
const size_t size,
const char** err) {
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 offset;
if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offset_data.offsets[l][dy][dx] = offset;
}
}
}
return TINYEXR_SUCCESS;
}
static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *head,
const unsigned char *marker, const size_t size,
const char **err) {
if (exr_image == NULL || exr_header == NULL || head == NULL ||
marker == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
if (exr_header->data_window.max_x < exr_header->data_window.min_x ||
exr_header->data_window.max_x - exr_header->data_window.min_x ==
std::numeric_limits<int>::max()) {
// Issue 63
tinyexr::SetErrorMessage("Invalid data width value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
int data_width =
exr_header->data_window.max_x - exr_header->data_window.min_x + 1;
if (exr_header->data_window.max_y < exr_header->data_window.min_y ||
exr_header->data_window.max_y - exr_header->data_window.min_y ==
std::numeric_limits<int>::max()) {
tinyexr::SetErrorMessage("Invalid data height value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
int data_height =
exr_header->data_window.max_y - exr_header->data_window.min_y + 1;
// Do not allow too large data_width and data_height. header invalid?
{
if (data_width > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("data width too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
if (data_height > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("data height too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
if (exr_header->tiled) {
if (exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("tile width too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
if (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("tile height too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
// Read offset tables.
OffsetData offset_data;
size_t num_blocks = 0;
// For a multi-resolution image, the size of the offset table will be calculated from the other attributes of the header.
// If chunk_count > 0 then chunk_count must be equal to the calculated tile count.
if (exr_header->tiled) {
{
std::vector<int> num_x_tiles, num_y_tiles;
PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_header);
num_blocks = InitTileOffsets(offset_data, exr_header, num_x_tiles, num_y_tiles);
if (exr_header->chunk_count > 0) {
if (exr_header->chunk_count != static_cast<int>(num_blocks)) {
tinyexr::SetErrorMessage("Invalid offset table size.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
int ret = ReadOffsets(offset_data, head, marker, size, err);
if (ret != TINYEXR_SUCCESS) return ret;
if (IsAnyOffsetsAreInvalid(offset_data)) {
ReconstructTileOffsets(offset_data, exr_header,
head, marker, size,
exr_header->multipart, exr_header->non_image);
}
} else if (exr_header->chunk_count > 0) {
// Use `chunkCount` attribute.
num_blocks = static_cast<size_t>(exr_header->chunk_count);
InitSingleResolutionOffsets(offset_data, num_blocks);
} else {
num_blocks = static_cast<size_t>(data_height) /
static_cast<size_t>(num_scanline_blocks);
if (num_blocks * static_cast<size_t>(num_scanline_blocks) <
static_cast<size_t>(data_height)) {
num_blocks++;
}
InitSingleResolutionOffsets(offset_data, num_blocks);
}
if (!exr_header->tiled) {
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
for (size_t y = 0; y < num_blocks; y++) {
tinyexr::tinyexr_uint64 offset;
// Issue #81
if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offsets[y] = offset;
}
// If line offsets are invalid, we try to reconstruct it.
// See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details.
for (size_t y = 0; y < num_blocks; y++) {
if (offsets[y] <= 0) {
// TODO(syoyo) Report as warning?
// if (err) {
// stringstream ss;
// ss << "Incomplete lineOffsets." << std::endl;
// (*err) += ss.str();
//}
bool ret =
ReconstructLineOffsets(&offsets, num_blocks, head, marker, size);
if (ret) {
// OK
break;
} else {
tinyexr::SetErrorMessage(
"Cannot reconstruct lineOffset table in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
}
{
std::string e;
int ret = DecodeChunk(exr_image, exr_header, offset_data, head, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
#if 1
FreeEXRImage(exr_image);
#else
// release memory(if exists)
if ((exr_header->num_channels > 0) && exr_image && exr_image->images) {
for (size_t c = 0; c < size_t(exr_header->num_channels); c++) {
if (exr_image->images[c]) {
free(exr_image->images[c]);
exr_image->images[c] = NULL;
}
}
free(exr_image->images);
exr_image->images = NULL;
}
#endif
}
return ret;
}
}
static void GetLayers(const EXRHeader &exr_header,
std::vector<std::string> &layer_names) {
// Naive implementation
// Group channels by layers
// go over all channel names, split by periods
// collect unique names
layer_names.clear();
for (int c = 0; c < exr_header.num_channels; c++) {
std::string full_name(exr_header.channels[c].name);
const size_t pos = full_name.find_last_of('.');
if (pos != std::string::npos && pos != 0 && pos + 1 < full_name.size()) {
full_name.erase(pos);
if (std::find(layer_names.begin(), layer_names.end(), full_name) ==
layer_names.end())
layer_names.push_back(full_name);
}
}
}
struct LayerChannel {
explicit LayerChannel(size_t i, std::string n) : index(i), name(n) {}
size_t index;
std::string name;
};
static void ChannelsInLayer(const EXRHeader &exr_header,
const std::string &layer_name,
std::vector<LayerChannel> &channels) {
channels.clear();
for (int c = 0; c < exr_header.num_channels; c++) {
std::string ch_name(exr_header.channels[c].name);
if (layer_name.empty()) {
const size_t pos = ch_name.find_last_of('.');
if (pos != std::string::npos && pos < ch_name.size()) {
ch_name = ch_name.substr(pos + 1);
}
} else {
const size_t pos = ch_name.find(layer_name + '.');
if (pos == std::string::npos) continue;
if (pos == 0) {
ch_name = ch_name.substr(layer_name.size() + 1);
}
}
LayerChannel ch(size_t(c), ch_name);
channels.push_back(ch);
}
}
} // namespace tinyexr
int EXRLayers(const char *filename, const char **layer_names[], int *num_layers,
const char **err) {
EXRVersion exr_version;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage("Invalid EXR header.", err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
std::vector<std::string> layer_vec;
tinyexr::GetLayers(exr_header, layer_vec);
(*num_layers) = int(layer_vec.size());
(*layer_names) = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(layer_vec.size())));
for (size_t c = 0; c < static_cast<size_t>(layer_vec.size()); c++) {
#ifdef _MSC_VER
(*layer_names)[c] = _strdup(layer_vec[c].c_str());
#else
(*layer_names)[c] = strdup(layer_vec[c].c_str());
#endif
}
FreeEXRHeader(&exr_header);
return TINYEXR_SUCCESS;
}
int LoadEXR(float **out_rgba, int *width, int *height, const char *filename,
const char **err) {
return LoadEXRWithLayer(out_rgba, width, height, filename,
/* layername */ NULL, err);
}
int LoadEXRWithLayer(float **out_rgba, int *width, int *height,
const char *filename, const char *layername,
const char **err) {
if (out_rgba == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
InitEXRImage(&exr_image);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
std::stringstream ss;
ss << "Failed to open EXR file or read version info from EXR file. code("
<< ret << ")";
tinyexr::SetErrorMessage(ss.str(), err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
{
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
// TODO: Probably limit loading to layers (channels) selected by layer index
{
int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
std::vector<std::string> layer_names;
tinyexr::GetLayers(exr_header, layer_names);
std::vector<tinyexr::LayerChannel> channels;
tinyexr::ChannelsInLayer(
exr_header, layername == NULL ? "" : std::string(layername), channels);
if (channels.size() < 1) {
tinyexr::SetErrorMessage("Layer Not Found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_LAYER_NOT_FOUND;
}
size_t ch_count = channels.size() < 4 ? channels.size() : 4;
for (size_t c = 0; c < ch_count; c++) {
const tinyexr::LayerChannel &ch = channels[c];
if (ch.name == "R") {
idxR = int(ch.index);
} else if (ch.name == "G") {
idxG = int(ch.index);
} else if (ch.name == "B") {
idxB = int(ch.index);
} else if (ch.name == "A") {
idxA = int(ch.index);
}
}
if (channels.size() == 1) {
int chIdx = int(channels.front().index);
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii = exr_image.tiles[it].offset_x *
static_cast<int>(exr_header.tile_size_x) +
i;
const int jj = exr_image.tiles[it].offset_y *
static_cast<int>(exr_header.tile_size_y) +
j;
const int idx = ii + jj * static_cast<int>(exr_image.width);
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val =
reinterpret_cast<float **>(exr_image.images)[chIdx][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// Assume RGB(A)
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int IsEXR(const char *filename) {
EXRVersion exr_version;
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_header == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument. `memory` or `exr_header` argument is null in "
"ParseEXRHeaderFromMemory()",
err);
// Invalid argument
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Insufficient header/data size.\n", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
tinyexr::HeaderInfo info;
info.clear();
int ret;
{
std::string err_str;
ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
if (err && !err_str.empty()) {
tinyexr::SetErrorMessage(err_str, err);
}
}
}
{
std::string warn;
std::string err_str;
if (!ConvertHeader(exr_header, info, &warn, &err_str)) {
if (err && !err_str.empty()) {
tinyexr::SetErrorMessage(err_str, err);
}
ret = TINYEXR_ERROR_INVALID_HEADER;
}
}
exr_header->multipart = version->multipart ? 1 : 0;
exr_header->non_image = version->non_image ? 1 : 0;
return ret;
}
int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err) {
if (out_rgba == NULL || memory == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
int ret = ParseEXRVersionFromMemory(&exr_version, memory, size);
if (ret != TINYEXR_SUCCESS) {
std::stringstream ss;
ss << "Failed to parse EXR version. code(" << ret << ")";
tinyexr::SetErrorMessage(ss.str(), err);
return ret;
}
ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
InitEXRImage(&exr_image);
ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
// TODO(syoyo): Refactor removing same code as used in LoadEXR().
if (exr_header.num_channels == 1) {
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[0][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[0][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// TODO(syoyo): Support non RGBA image.
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++)
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
// TODO(syoyo): return wfopen_s erro code
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize < 16) {
tinyexr::SetErrorMessage("File size too short " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize,
err);
}
int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *memory, const size_t size,
const char **err) {
if (exr_image == NULL || memory == NULL ||
(size < tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
const unsigned char *head = memory;
const unsigned char *marker = reinterpret_cast<const unsigned char *>(
memory + exr_header->header_len +
8); // +8 for magic number + version header.
return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size,
err);
}
namespace tinyexr
{
// out_data must be allocated initially with the block-header size
// of the current image(-part) type
static bool EncodePixelData(/* out */ std::vector<unsigned char>& out_data,
const unsigned char* const* images,
int compression_type,
int /*line_order*/,
int width, // for tiled : tile.width
int /*height*/, // for tiled : header.tile_size_y
int x_stride, // for tiled : header.tile_size_x
int line_no, // for tiled : 0
int num_lines, // for tiled : tile.height
size_t pixel_data_size,
const std::vector<ChannelInfo>& channels,
const std::vector<size_t>& channel_offset_list,
const void* compression_param = 0) // zfp compression param
{
size_t buf_size = static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
static_cast<size_t>(pixel_data_size);
//int last2bit = (buf_size & 3);
// buf_size must be multiple of four
//if(last2bit) buf_size += 4 - last2bit;
std::vector<unsigned char> buf(buf_size);
size_t start_y = static_cast<size_t>(line_no);
for (size_t c = 0; c < channels.size(); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
tinyexr::FP16 h16;
h16.u = reinterpret_cast<const unsigned short * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::FP32 f32 = half_to_float(h16);
tinyexr::swap4(&f32.f);
// line_ptr[x] = f32.f;
tinyexr::cpy4(line_ptr + x, &(f32.f));
}
}
} else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
unsigned short val = reinterpret_cast<const unsigned short * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap2(&val);
// line_ptr[x] = val;
tinyexr::cpy2(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
tinyexr::FP32 f32;
f32.f = reinterpret_cast<const float * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::FP16 h16;
h16 = float_to_half_full(f32);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u));
// line_ptr[x] = h16.u;
tinyexr::cpy2(line_ptr + x, &(h16.u));
}
}
} else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
float val = reinterpret_cast<const float * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
unsigned int val = reinterpret_cast<const unsigned int * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
}
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed)
out_data.insert(out_data.end(), buf.begin(), buf.end());
} else if ((compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#if TINYEXR_USE_MINIZ
std::vector<unsigned char> block(mz_compressBound(
static_cast<unsigned long>(buf.size())));
#else
std::vector<unsigned char> block(
compressBound(static_cast<uLong>(buf.size())));
#endif
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressZip(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// (buf.size() * 3) / 2 would be enough.
std::vector<unsigned char> block((buf.size() * 3) / 2);
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressRle(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
unsigned int bufLen =
8192 + static_cast<unsigned int>(
2 * static_cast<unsigned int>(
buf.size())); // @fixme { compute good bound. }
std::vector<unsigned char> block(bufLen);
unsigned int outSize = static_cast<unsigned int>(block.size());
CompressPiz(&block.at(0), &outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
buf.size(), channels, width, num_lines);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = outSize;
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
#else
assert(0);
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
const ZFPCompressionParam* zfp_compression_param = reinterpret_cast<const ZFPCompressionParam*>(compression_param);
std::vector<unsigned char> block;
unsigned int outSize;
tinyexr::CompressZfp(
&block, &outSize, reinterpret_cast<const float *>(&buf.at(0)),
width, num_lines, static_cast<int>(channels.size()), *zfp_compression_param);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = outSize;
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
#else
(void)compression_param;
assert(0);
#endif
} else {
assert(0);
return false;
}
return true;
}
static int EncodeTiledLevel(const EXRImage* level_image, const EXRHeader* exr_header,
const std::vector<tinyexr::ChannelInfo>& channels,
std::vector<std::vector<unsigned char> >& data_list,
size_t start_index, // for data_list
int num_x_tiles, int num_y_tiles,
const std::vector<size_t>& channel_offset_list,
int pixel_data_size,
const void* compression_param, // must be set if zfp compression is enabled
std::string* err) {
int num_tiles = num_x_tiles * num_y_tiles;
assert(num_tiles == level_image->num_tiles);
if ((exr_header->tile_size_x > level_image->width || exr_header->tile_size_y > level_image->height) &&
level_image->level_x == 0 && level_image->level_y == 0) {
if (err) {
(*err) += "Failed to encode tile data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
#else
bool invalid_data(false);
#endif
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> tile_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_tiles)) {
num_threads = int(num_tiles);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int i = 0;
while ((i = tile_count++) < num_tiles) {
#else
// Use signed int since some OpenMP compiler doesn't allow unsigned type for
// `parallel for`
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_tiles; i++) {
#endif
size_t tile_idx = static_cast<size_t>(i);
size_t data_idx = tile_idx + start_index;
int x_tile = i % num_x_tiles;
int y_tile = i / num_x_tiles;
EXRTile& tile = level_image->tiles[tile_idx];
const unsigned char* const* images =
static_cast<const unsigned char* const*>(tile.images);
data_list[data_idx].resize(5*sizeof(int));
size_t data_header_size = data_list[data_idx].size();
bool ret = EncodePixelData(data_list[data_idx],
images,
exr_header->compression_type,
0, // increasing y
tile.width,
exr_header->tile_size_y,
exr_header->tile_size_x,
0,
tile.height,
pixel_data_size,
channels,
channel_offset_list,
compression_param);
if (!ret) {
invalid_data = true;
continue;
}
assert(data_list[data_idx].size() > data_header_size);
int data_len = static_cast<int>(data_list[data_idx].size() - data_header_size);
//tileX, tileY, levelX, levelY // pixel_data_size(int)
memcpy(&data_list[data_idx][0], &x_tile, sizeof(int));
memcpy(&data_list[data_idx][4], &y_tile, sizeof(int));
memcpy(&data_list[data_idx][8], &level_image->level_x, sizeof(int));
memcpy(&data_list[data_idx][12], &level_image->level_y, sizeof(int));
memcpy(&data_list[data_idx][16], &data_len, sizeof(int));
swap4(reinterpret_cast<int*>(&data_list[data_idx][0]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][4]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][8]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][12]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][16]));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
if (invalid_data) {
if (err) {
(*err) += "Failed to encode tile data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
return TINYEXR_SUCCESS;
}
static int NumScanlines(int compression_type) {
int num_scanlines = 1;
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanlines = 16;
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanlines = 32;
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanlines = 16;
}
return num_scanlines;
}
static int EncodeChunk(const EXRImage* exr_image, const EXRHeader* exr_header,
const std::vector<ChannelInfo>& channels,
int num_blocks,
tinyexr_uint64 chunk_offset, // starting offset of current chunk
bool is_multipart,
OffsetData& offset_data, // output block offsets, must be initialized
std::vector<std::vector<unsigned char> >& data_list, // output
tinyexr_uint64& total_size, // output: ending offset of current chunk
std::string* err) {
int num_scanlines = NumScanlines(exr_header->compression_type);
data_list.resize(num_blocks);
std::vector<size_t> channel_offset_list(
static_cast<size_t>(exr_header->num_channels));
int pixel_data_size = 0;
{
size_t channel_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
channel_offset_list[c] = channel_offset;
if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixel_data_size += sizeof(unsigned short);
channel_offset += sizeof(unsigned short);
} else if (channels[c].requested_pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) {
pixel_data_size += sizeof(float);
channel_offset += sizeof(float);
} else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_UINT) {
pixel_data_size += sizeof(unsigned int);
channel_offset += sizeof(unsigned int);
} else {
assert(0);
}
}
}
const void* compression_param = 0;
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
// Use ZFP compression parameter from custom attributes(if such a parameter
// exists)
{
std::string e;
bool ret = tinyexr::FindZFPCompressionParam(
&zfp_compression_param, exr_header->custom_attributes,
exr_header->num_custom_attributes, &e);
if (!ret) {
// Use predefined compression parameter.
zfp_compression_param.type = 0;
zfp_compression_param.rate = 2;
}
compression_param = &zfp_compression_param;
}
#endif
tinyexr_uint64 offset = chunk_offset;
tinyexr_uint64 doffset = is_multipart ? 4u : 0u;
if (exr_image->tiles) {
const EXRImage* level_image = exr_image;
size_t block_idx = 0;
tinyexr::tinyexr_uint64 block_data_size = 0;
int num_levels = (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ?
offset_data.num_x_levels : (offset_data.num_x_levels * offset_data.num_y_levels);
for (int level_index = 0; level_index < num_levels; ++level_index) {
if (!level_image) {
if (err) {
(*err) += "Invalid number of tiled levels for EncodeChunk\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int level_index_from_image = LevelIndex(level_image->level_x, level_image->level_y,
exr_header->tile_level_mode, offset_data.num_x_levels);
if (level_index_from_image != level_index) {
if (err) {
(*err) += "Incorrect level ordering in tiled image\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int num_y_tiles = (int)offset_data.offsets[level_index].size();
assert(num_y_tiles);
int num_x_tiles = (int)offset_data.offsets[level_index][0].size();
assert(num_x_tiles);
std::string e;
int ret = EncodeTiledLevel(level_image,
exr_header,
channels,
data_list,
block_idx,
num_x_tiles,
num_y_tiles,
channel_offset_list,
pixel_data_size,
compression_param,
&e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty() && err) {
(*err) += e;
}
return ret;
}
for (size_t j = 0; j < static_cast<size_t>(num_y_tiles); ++j)
for (size_t i = 0; i < static_cast<size_t>(num_x_tiles); ++i) {
offset_data.offsets[level_index][j][i] = offset;
swap8(reinterpret_cast<tinyexr_uint64*>(&offset_data.offsets[level_index][j][i]));
offset += data_list[block_idx].size() + doffset;
block_data_size += data_list[block_idx].size();
++block_idx;
}
level_image = level_image->next_level;
}
assert(static_cast<int>(block_idx) == num_blocks);
total_size = offset;
} else { // scanlines
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
std::vector<std::thread> workers;
std::atomic<int> block_count(0);
int num_threads = std::min(std::max(1, int(std::thread::hardware_concurrency())), num_blocks);
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int i = 0;
while ((i = block_count++) < num_blocks) {
#else
bool invalid_data(false);
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_blocks; i++) {
#endif
int start_y = num_scanlines * i;
int end_Y = (std::min)(num_scanlines * (i + 1), exr_image->height);
int num_lines = end_Y - start_y;
const unsigned char* const* images =
static_cast<const unsigned char* const*>(exr_image->images);
data_list[i].resize(2*sizeof(int));
size_t data_header_size = data_list[i].size();
bool ret = EncodePixelData(data_list[i],
images,
exr_header->compression_type,
0, // increasing y
exr_image->width,
exr_image->height,
exr_image->width,
start_y,
num_lines,
pixel_data_size,
channels,
channel_offset_list,
compression_param);
if (!ret) {
invalid_data = true;
continue; // "break" cannot be used with OpenMP
}
assert(data_list[i].size() > data_header_size);
int data_len = static_cast<int>(data_list[i].size() - data_header_size);
memcpy(&data_list[i][0], &start_y, sizeof(int));
memcpy(&data_list[i][4], &data_len, sizeof(int));
swap4(reinterpret_cast<int*>(&data_list[i][0]));
swap4(reinterpret_cast<int*>(&data_list[i][4]));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
if (invalid_data) {
if (err) {
(*err) += "Failed to encode scanline data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
offsets[i] = offset;
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i]));
offset += data_list[i].size() + doffset;
}
total_size = static_cast<size_t>(offset);
}
return TINYEXR_SUCCESS;
}
// can save a single or multi-part image (no deep* formats)
static size_t SaveEXRNPartImageToMemory(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
unsigned char** memory_out, const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory_out == NULL) {
SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
{
for (unsigned int i = 0; i < num_parts; ++i) {
if (exr_headers[i]->compression_type < 0) {
SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
#if !TINYEXR_USE_PIZ
if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
SetErrorMessage("PIZ compression is not supported in this build",
err);
return 0;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
SetErrorMessage("ZFP compression is not supported in this build",
err);
return 0;
}
#else
for (int c = 0; c < exr_header->num_channels; ++c) {
if (exr_headers[i]->requested_pixel_types[c] != TINYEXR_PIXELTYPE_FLOAT) {
SetErrorMessage("Pixel type must be FLOAT for ZFP compression",
err);
return 0;
}
}
#endif
}
}
std::vector<unsigned char> memory;
// Header
{
const char header[] = { 0x76, 0x2f, 0x31, 0x01 };
memory.insert(memory.end(), header, header + 4);
}
// Version
// using value from the first header
int long_name = exr_headers[0]->long_name;
{
char marker[] = { 2, 0, 0, 0 };
/* @todo
if (exr_header->non_image) {
marker[1] |= 0x8;
}
*/
// tiled
if (num_parts == 1 && exr_images[0].tiles) {
marker[1] |= 0x2;
}
// long_name
if (long_name) {
marker[1] |= 0x4;
}
// multipart
if (num_parts > 1) {
marker[1] |= 0x10;
}
memory.insert(memory.end(), marker, marker + 4);
}
int total_chunk_count = 0;
std::vector<int> chunk_count(num_parts);
std::vector<OffsetData> offset_data(num_parts);
for (unsigned int i = 0; i < num_parts; ++i) {
if (!exr_images[i].tiles) {
int num_scanlines = NumScanlines(exr_headers[i]->compression_type);
chunk_count[i] =
(exr_images[i].height + num_scanlines - 1) / num_scanlines;
InitSingleResolutionOffsets(offset_data[i], chunk_count[i]);
total_chunk_count += chunk_count[i];
} else {
{
std::vector<int> num_x_tiles, num_y_tiles;
PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]);
chunk_count[i] =
InitTileOffsets(offset_data[i], exr_headers[i], num_x_tiles, num_y_tiles);
total_chunk_count += chunk_count[i];
}
}
}
// Write attributes to memory buffer.
std::vector< std::vector<tinyexr::ChannelInfo> > channels(num_parts);
{
std::set<std::string> partnames;
for (unsigned int i = 0; i < num_parts; ++i) {
//channels
{
std::vector<unsigned char> data;
for (int c = 0; c < exr_headers[i]->num_channels; c++) {
tinyexr::ChannelInfo info;
info.p_linear = 0;
info.pixel_type = exr_headers[i]->pixel_types[c];
info.requested_pixel_type = exr_headers[i]->requested_pixel_types[c];
info.x_sampling = 1;
info.y_sampling = 1;
info.name = std::string(exr_headers[i]->channels[c].name);
channels[i].push_back(info);
}
tinyexr::WriteChannelInfo(data, channels[i]);
tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0),
static_cast<int>(data.size()));
}
{
int comp = exr_headers[i]->compression_type;
swap4(&comp);
WriteAttributeToMemory(
&memory, "compression", "compression",
reinterpret_cast<const unsigned char*>(&comp), 1);
}
{
int data[4] = { 0, 0, exr_images[i].width - 1, exr_images[i].height - 1 };
swap4(&data[0]);
swap4(&data[1]);
swap4(&data[2]);
swap4(&data[3]);
WriteAttributeToMemory(
&memory, "dataWindow", "box2i",
reinterpret_cast<const unsigned char*>(data), sizeof(int) * 4);
int data0[4] = { 0, 0, exr_images[0].width - 1, exr_images[0].height - 1 };
swap4(&data0[0]);
swap4(&data0[1]);
swap4(&data0[2]);
swap4(&data0[3]);
// Note: must be the same across parts (currently, using value from the first header)
WriteAttributeToMemory(
&memory, "displayWindow", "box2i",
reinterpret_cast<const unsigned char*>(data0), sizeof(int) * 4);
}
{
unsigned char line_order = 0; // @fixme { read line_order from EXRHeader }
WriteAttributeToMemory(&memory, "lineOrder", "lineOrder",
&line_order, 1);
}
{
// Note: must be the same across parts
float aspectRatio = 1.0f;
swap4(&aspectRatio);
WriteAttributeToMemory(
&memory, "pixelAspectRatio", "float",
reinterpret_cast<const unsigned char*>(&aspectRatio), sizeof(float));
}
{
float center[2] = { 0.0f, 0.0f };
swap4(¢er[0]);
swap4(¢er[1]);
WriteAttributeToMemory(
&memory, "screenWindowCenter", "v2f",
reinterpret_cast<const unsigned char*>(center), 2 * sizeof(float));
}
{
float w = 1.0f;
swap4(&w);
WriteAttributeToMemory(&memory, "screenWindowWidth", "float",
reinterpret_cast<const unsigned char*>(&w),
sizeof(float));
}
if (exr_images[i].tiles) {
unsigned char tile_mode = static_cast<unsigned char>(exr_headers[i]->tile_level_mode & 0x3);
if (exr_headers[i]->tile_rounding_mode) tile_mode |= (1u << 4u);
//unsigned char data[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
unsigned int datai[3] = { 0, 0, 0 };
unsigned char* data = reinterpret_cast<unsigned char*>(&datai[0]);
datai[0] = static_cast<unsigned int>(exr_headers[i]->tile_size_x);
datai[1] = static_cast<unsigned int>(exr_headers[i]->tile_size_y);
data[8] = tile_mode;
swap4(reinterpret_cast<unsigned int*>(&data[0]));
swap4(reinterpret_cast<unsigned int*>(&data[4]));
WriteAttributeToMemory(
&memory, "tiles", "tiledesc",
reinterpret_cast<const unsigned char*>(data), 9);
}
// must be present for multi-part files - according to spec.
if (num_parts > 1) {
// name
{
size_t len = 0;
if ((len = strlen(exr_headers[i]->name)) > 0) {
partnames.emplace(exr_headers[i]->name);
if (partnames.size() != i + 1) {
SetErrorMessage("'name' attributes must be unique for a multi-part file", err);
return 0;
}
WriteAttributeToMemory(
&memory, "name", "string",
reinterpret_cast<const unsigned char*>(exr_headers[i]->name),
static_cast<int>(len));
} else {
SetErrorMessage("Invalid 'name' attribute for a multi-part file", err);
return 0;
}
}
// type
{
const char* type = "scanlineimage";
if (exr_images[i].tiles) type = "tiledimage";
WriteAttributeToMemory(
&memory, "type", "string",
reinterpret_cast<const unsigned char*>(type),
static_cast<int>(strlen(type)));
}
// chunkCount
{
WriteAttributeToMemory(
&memory, "chunkCount", "int",
reinterpret_cast<const unsigned char*>(&chunk_count[i]),
4);
}
}
// Custom attributes
if (exr_headers[i]->num_custom_attributes > 0) {
for (int j = 0; j < exr_headers[i]->num_custom_attributes; j++) {
tinyexr::WriteAttributeToMemory(
&memory, exr_headers[i]->custom_attributes[j].name,
exr_headers[i]->custom_attributes[j].type,
reinterpret_cast<const unsigned char*>(
exr_headers[i]->custom_attributes[j].value),
exr_headers[i]->custom_attributes[j].size);
}
}
{ // end of header
memory.push_back(0);
}
}
}
if (num_parts > 1) {
// end of header list
memory.push_back(0);
}
tinyexr_uint64 chunk_offset = memory.size() + size_t(total_chunk_count) * sizeof(tinyexr_uint64);
tinyexr_uint64 total_size = 0;
std::vector< std::vector< std::vector<unsigned char> > > data_lists(num_parts);
for (unsigned int i = 0; i < num_parts; ++i) {
std::string e;
int ret = EncodeChunk(&exr_images[i], exr_headers[i],
channels[i],
chunk_count[i],
// starting offset of current chunk after part-number
chunk_offset,
num_parts > 1,
offset_data[i], // output: block offsets, must be initialized
data_lists[i], // output
total_size, // output
&e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
return 0;
}
chunk_offset = total_size;
}
// Allocating required memory
if (total_size == 0) { // something went wrong
tinyexr::SetErrorMessage("Output memory size is zero", err);
return 0;
}
(*memory_out) = static_cast<unsigned char*>(malloc(total_size));
// Writing header
memcpy((*memory_out), &memory[0], memory.size());
unsigned char* memory_ptr = *memory_out + memory.size();
size_t sum = memory.size();
// Writing offset data for chunks
for (unsigned int i = 0; i < num_parts; ++i) {
if (exr_images[i].tiles) {
const EXRImage* level_image = &exr_images[i];
int num_levels = (exr_headers[i]->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ?
offset_data[i].num_x_levels : (offset_data[i].num_x_levels * offset_data[i].num_y_levels);
for (int level_index = 0; level_index < num_levels; ++level_index) {
for (size_t j = 0; j < offset_data[i].offsets[level_index].size(); ++j) {
size_t num_bytes = sizeof(tinyexr_uint64) * offset_data[i].offsets[level_index][j].size();
sum += num_bytes;
assert(sum <= total_size);
memcpy(memory_ptr,
reinterpret_cast<unsigned char*>(&offset_data[i].offsets[level_index][j][0]),
num_bytes);
memory_ptr += num_bytes;
}
level_image = level_image->next_level;
}
} else {
size_t num_bytes = sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(chunk_count[i]);
sum += num_bytes;
assert(sum <= total_size);
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data[i].offsets[0][0];
memcpy(memory_ptr, reinterpret_cast<unsigned char*>(&offsets[0]), num_bytes);
memory_ptr += num_bytes;
}
}
// Writing chunk data
for (unsigned int i = 0; i < num_parts; ++i) {
for (size_t j = 0; j < static_cast<size_t>(chunk_count[i]); ++j) {
if (num_parts > 1) {
sum += 4;
assert(sum <= total_size);
unsigned int part_number = i;
swap4(&part_number);
memcpy(memory_ptr, &part_number, 4);
memory_ptr += 4;
}
sum += data_lists[i][j].size();
assert(sum <= total_size);
memcpy(memory_ptr, &data_lists[i][j][0], data_lists[i][j].size());
memory_ptr += data_lists[i][j].size();
}
}
assert(sum == total_size);
return total_size; // OK
}
} // tinyexr
size_t SaveEXRImageToMemory(const EXRImage* exr_image,
const EXRHeader* exr_header,
unsigned char** memory_out, const char** err) {
return tinyexr::SaveEXRNPartImageToMemory(exr_image, &exr_header, 1, memory_out, err);
}
int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL || filename == NULL ||
exr_header->compression_type < 0) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
#else
// Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
fp = fopen(filename, "wb");
#endif
#else
fp = fopen(filename, "wb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err);
if (mem_size == 0) {
return TINYEXR_ERROR_SERIALZATION_FAILED;
}
size_t written_size = 0;
if ((mem_size > 0) && mem) {
written_size = fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
if (written_size != mem_size) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
return TINYEXR_SUCCESS;
}
size_t SaveEXRMultipartImageToMemory(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
unsigned char** memory_out, const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts < 2 ||
memory_out == NULL) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
return tinyexr::SaveEXRNPartImageToMemory(exr_images, exr_headers, num_parts, memory_out, err);
}
int SaveEXRMultipartImageToFile(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
const char* filename,
const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts < 2) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRMultipartImageToFile",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
#else
// Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
fp = fopen(filename, "wb");
#endif
#else
fp = fopen(filename, "wb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRMultipartImageToMemory(exr_images, exr_headers, num_parts, &mem, err);
if (mem_size == 0) {
return TINYEXR_ERROR_SERIALZATION_FAILED;
}
size_t written_size = 0;
if ((mem_size > 0) && mem) {
written_size = fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
if (written_size != mem_size) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
return TINYEXR_SUCCESS;
}
int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) {
if (deep_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
FILE *fp = fopen(filename, "rb");
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#endif
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize == 0) {
fclose(fp);
tinyexr::SetErrorMessage("File size is zero : " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
(void)ret;
}
fclose(fp);
const char *head = &buf[0];
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
tinyexr::SetErrorMessage("Invalid magic number", err);
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
// Version, scanline.
{
// ver 2.0, scanline, deep bit on(0x800)
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) {
tinyexr::SetErrorMessage("Unsupported version or scanline", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int num_scanline_blocks = 1; // 16 for ZIP compression.
int compression_type = -1;
int num_channels = -1;
std::vector<tinyexr::ChannelInfo> channels;
// Read attributes
size_t size = filesize - tinyexr::kEXRVersionSize;
for (;;) {
if (0 == size) {
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
marker++;
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
std::stringstream ss;
ss << "Failed to parse attribute\n";
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (attr_name.compare("compression") == 0) {
compression_type = data[0];
if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) {
std::stringstream ss;
ss << "Unsupported compression type : " << compression_type;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
}
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!tinyexr::ReadChannelInfo(channels, data)) {
tinyexr::SetErrorMessage("Failed to parse channel info", err);
return TINYEXR_ERROR_INVALID_DATA;
}
num_channels = static_cast<int>(channels.size());
if (num_channels < 1) {
tinyexr::SetErrorMessage("Invalid channels format", err);
return TINYEXR_ERROR_INVALID_DATA;
}
} else if (attr_name.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
tinyexr::swap4(&dx);
tinyexr::swap4(&dy);
tinyexr::swap4(&dw);
tinyexr::swap4(&dh);
} else if (attr_name.compare("displayWindow") == 0) {
int x;
int y;
int w;
int h;
memcpy(&x, &data.at(0), sizeof(int));
memcpy(&y, &data.at(4), sizeof(int));
memcpy(&w, &data.at(8), sizeof(int));
memcpy(&h, &data.at(12), sizeof(int));
tinyexr::swap4(&x);
tinyexr::swap4(&y);
tinyexr::swap4(&w);
tinyexr::swap4(&h);
}
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(num_channels >= 1);
int data_width = dw - dx + 1;
int data_height = dh - dy + 1;
// Read offset tables.
int num_blocks = data_height / num_scanline_blocks;
if (num_blocks * num_scanline_blocks < data_height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks));
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
tinyexr::tinyexr_int64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset));
marker += sizeof(tinyexr::tinyexr_int64); // = 8
offsets[y] = offset;
}
#if TINYEXR_USE_PIZ
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) {
#else
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#endif
// OK
} else {
tinyexr::SetErrorMessage("Unsupported compression format", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
deep_image->image = static_cast<float ***>(
malloc(sizeof(float **) * static_cast<size_t>(num_channels)));
for (int c = 0; c < num_channels; c++) {
deep_image->image[c] = static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
}
}
deep_image->offset_table = static_cast<int **>(
malloc(sizeof(int *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
deep_image->offset_table[y] = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(data_width)));
}
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// int: y coordinate
// int64: packed size of pixel offset table
// int64: packed size of sample data
// int64: unpacked size of sample data
// compressed pixel offset table
// compressed sample data
int line_no;
tinyexr::tinyexr_int64 packedOffsetTableSize;
tinyexr::tinyexr_int64 packedSampleDataSize;
tinyexr::tinyexr_int64 unpackedSampleDataSize;
memcpy(&line_no, data_ptr, sizeof(int));
memcpy(&packedOffsetTableSize, data_ptr + 4,
sizeof(tinyexr::tinyexr_int64));
memcpy(&packedSampleDataSize, data_ptr + 12,
sizeof(tinyexr::tinyexr_int64));
memcpy(&unpackedSampleDataSize, data_ptr + 20,
sizeof(tinyexr::tinyexr_int64));
tinyexr::swap4(&line_no);
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize));
std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width));
// decode pixel offset table.
{
unsigned long dstLen =
static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int));
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)),
&dstLen, data_ptr + 28,
static_cast<unsigned long>(packedOffsetTableSize))) {
return false;
}
assert(dstLen == pixelOffsetTable.size() * sizeof(int));
for (size_t i = 0; i < static_cast<size_t>(data_width); i++) {
deep_image->offset_table[y][i] = pixelOffsetTable[i];
}
}
std::vector<unsigned char> sample_data(
static_cast<size_t>(unpackedSampleDataSize));
// decode sample data.
{
unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize);
if (dstLen) {
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen,
data_ptr + 28 + packedOffsetTableSize,
static_cast<unsigned long>(packedSampleDataSize))) {
return false;
}
assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize));
}
}
// decode sample
int sampleSize = -1;
std::vector<int> channel_offset_list(static_cast<size_t>(num_channels));
{
int channel_offset = 0;
for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) {
channel_offset_list[i] = channel_offset;
if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT
channel_offset += 4;
} else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half
channel_offset += 2;
} else if (channels[i].pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) { // float
channel_offset += 4;
} else {
assert(0);
}
}
sampleSize = channel_offset;
}
assert(sampleSize >= 2);
assert(static_cast<size_t>(
pixelOffsetTable[static_cast<size_t>(data_width - 1)] *
sampleSize) == sample_data.size());
int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize;
//
// Alloc memory
//
//
// pixel data is stored as image[channels][pixel_samples]
//
{
tinyexr::tinyexr_uint64 data_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
deep_image->image[c][y] = static_cast<float *>(
malloc(sizeof(float) * static_cast<size_t>(samples_per_line)));
if (channels[c].pixel_type == 0) { // UINT
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
unsigned int ui;
unsigned int *src_ptr = reinterpret_cast<unsigned int *>(
&sample_data.at(size_t(data_offset) + x * sizeof(int)));
tinyexr::cpy4(&ui, src_ptr);
deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme
}
data_offset +=
sizeof(unsigned int) * static_cast<size_t>(samples_per_line);
} else if (channels[c].pixel_type == 1) { // half
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
tinyexr::FP16 f16;
const unsigned short *src_ptr = reinterpret_cast<unsigned short *>(
&sample_data.at(size_t(data_offset) + x * sizeof(short)));
tinyexr::cpy2(&(f16.u), src_ptr);
tinyexr::FP32 f32 = half_to_float(f16);
deep_image->image[c][y][x] = f32.f;
}
data_offset += sizeof(short) * static_cast<size_t>(samples_per_line);
} else { // float
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
float f;
const float *src_ptr = reinterpret_cast<float *>(
&sample_data.at(size_t(data_offset) + x * sizeof(float)));
tinyexr::cpy4(&f, src_ptr);
deep_image->image[c][y][x] = f;
}
data_offset += sizeof(float) * static_cast<size_t>(samples_per_line);
}
}
}
} // y
deep_image->width = data_width;
deep_image->height = data_height;
deep_image->channel_names = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(num_channels)));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
#ifdef _WIN32
deep_image->channel_names[c] = _strdup(channels[c].name.c_str());
#else
deep_image->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
deep_image->num_channels = num_channels;
return TINYEXR_SUCCESS;
}
void InitEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return;
}
exr_image->width = 0;
exr_image->height = 0;
exr_image->num_channels = 0;
exr_image->images = NULL;
exr_image->tiles = NULL;
exr_image->next_level = NULL;
exr_image->level_x = 0;
exr_image->level_y = 0;
exr_image->num_tiles = 0;
}
void FreeEXRErrorMessage(const char *msg) {
if (msg) {
free(reinterpret_cast<void *>(const_cast<char *>(msg)));
}
return;
}
void InitEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return;
}
memset(exr_header, 0, sizeof(EXRHeader));
}
int FreeEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->channels) {
free(exr_header->channels);
}
if (exr_header->pixel_types) {
free(exr_header->pixel_types);
}
if (exr_header->requested_pixel_types) {
free(exr_header->requested_pixel_types);
}
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
if (exr_header->custom_attributes[i].value) {
free(exr_header->custom_attributes[i].value);
}
}
if (exr_header->custom_attributes) {
free(exr_header->custom_attributes);
}
EXRSetNameAttr(exr_header, NULL);
return TINYEXR_SUCCESS;
}
void EXRSetNameAttr(EXRHeader* exr_header, const char* name) {
if (exr_header == NULL) {
return;
}
memset(exr_header->name, 0, 256);
if (name != NULL) {
size_t len = std::min(strlen(name), (size_t)255);
if (len) {
memcpy(exr_header->name, name, len);
}
}
}
int EXRNumLevels(const EXRImage* exr_image) {
if (exr_image == NULL) return 0;
if(exr_image->images) return 1; // scanlines
int levels = 1;
const EXRImage* level_image = exr_image;
while((level_image = level_image->next_level)) ++levels;
return levels;
}
int FreeEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_image->next_level) {
FreeEXRImage(exr_image->next_level);
delete exr_image->next_level;
}
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->images && exr_image->images[i]) {
free(exr_image->images[i]);
}
}
if (exr_image->images) {
free(exr_image->images);
}
if (exr_image->tiles) {
for (int tid = 0; tid < exr_image->num_tiles; tid++) {
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) {
free(exr_image->tiles[tid].images[i]);
}
}
if (exr_image->tiles[tid].images) {
free(exr_image->tiles[tid].images);
}
}
free(exr_image->tiles);
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_header == NULL || exr_version == NULL || filename == NULL) {
tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_INVALID_FILE;
}
#else
// Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("fread() error on " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize,
err);
}
int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers,
int *num_headers,
const EXRVersion *exr_version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_headers == NULL || num_headers == NULL ||
exr_version == NULL) {
// Invalid argument
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Data size too short", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
std::vector<tinyexr::HeaderInfo> infos;
for (;;) {
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
bool empty_header = false;
int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str,
marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage(err_str, err);
return ret;
}
if (empty_header) {
marker += 1; // skip '\0'
break;
}
// `chunkCount` must exist in the header.
if (info.chunk_count == 0) {
tinyexr::SetErrorMessage(
"`chunkCount' attribute is not found in the header.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
infos.push_back(info);
// move to next header.
marker += info.header_len;
size -= info.header_len;
}
// allocate memory for EXRHeader and create array of EXRHeader pointers.
(*exr_headers) =
static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size()));
int retcode = TINYEXR_SUCCESS;
for (size_t i = 0; i < infos.size(); i++) {
EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader)));
memset(exr_header, 0, sizeof(EXRHeader));
std::string warn;
std::string _err;
if (!ConvertHeader(exr_header, infos[i], &warn, &_err)) {
if (!_err.empty()) {
tinyexr::SetErrorMessage(
_err, err);
}
// continue to converting headers
retcode = TINYEXR_ERROR_INVALID_HEADER;
}
exr_header->multipart = exr_version->multipart ? 1 : 0;
(*exr_headers)[i] = exr_header;
}
(*num_headers) = static_cast<int>(infos.size());
return retcode;
}
int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers,
const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_headers == NULL || num_headers == NULL || exr_version == NULL ||
filename == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromFile()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_INVALID_FILE;
}
#else
// Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRMultipartHeaderFromMemory(
exr_headers, num_headers, exr_version, &buf.at(0), filesize, err);
}
int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory,
size_t size) {
if (version == NULL || memory == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory;
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
version->tiled = false;
version->long_name = false;
version->non_image = false;
version->multipart = false;
// Parse version header.
{
// must be 2
if (marker[0] != 2) {
return TINYEXR_ERROR_INVALID_EXR_VERSION;
}
if (version == NULL) {
return TINYEXR_SUCCESS; // May OK
}
version->version = 2;
if (marker[1] & 0x2) { // 9th bit
version->tiled = true;
}
if (marker[1] & 0x4) { // 10th bit
version->long_name = true;
}
if (marker[1] & 0x8) { // 11th bit
version->non_image = true; // (deep image)
}
if (marker[1] & 0x10) { // 12th bit
version->multipart = true;
}
}
return TINYEXR_SUCCESS;
}
int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) {
if (filename == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
errno_t err = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (err != 0) {
// TODO(syoyo): return wfopen_s erro code
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t file_size;
// Compute size
fseek(fp, 0, SEEK_END);
file_size = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (file_size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
unsigned char buf[tinyexr::kEXRVersionSize];
size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp);
fclose(fp);
if (ret != tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize);
}
int LoadEXRMultipartImageFromMemory(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromMemory()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
// compute total header size.
size_t total_header_size = 0;
for (unsigned int i = 0; i < num_parts; i++) {
if (exr_headers[i]->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
total_header_size += exr_headers[i]->header_len;
}
const char *marker = reinterpret_cast<const char *>(
memory + total_header_size + 4 +
4); // +8 for magic number and version header.
marker += 1; // Skip empty header.
// NOTE 1:
// In multipart image, There is 'part number' before chunk data.
// 4 byte : part number
// 4+ : chunk
//
// NOTE 2:
// EXR spec says 'part number' is 'unsigned long' but actually this is
// 'unsigned int(4 bytes)' in OpenEXR implementation...
// http://www.openexr.com/openexrfilelayout.pdf
// Load chunk offset table.
std::vector<tinyexr::OffsetData> chunk_offset_table_list;
chunk_offset_table_list.reserve(num_parts);
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
chunk_offset_table_list.resize(chunk_offset_table_list.size() + 1);
tinyexr::OffsetData& offset_data = chunk_offset_table_list.back();
if (!exr_headers[i]->tiled || exr_headers[i]->tile_level_mode == TINYEXR_TILE_ONE_LEVEL) {
tinyexr::InitSingleResolutionOffsets(offset_data, exr_headers[i]->chunk_count);
std::vector<tinyexr::tinyexr_uint64>& offset_table = offset_data.offsets[0][0];
for (size_t c = 0; c < offset_table.size(); c++) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, 8);
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
offset_table[c] = offset + 4; // +4 to skip 'part number'
marker += 8;
}
} else {
{
std::vector<int> num_x_tiles, num_y_tiles;
tinyexr::PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]);
int num_blocks = InitTileOffsets(offset_data, exr_headers[i], num_x_tiles, num_y_tiles);
if (num_blocks != exr_headers[i]->chunk_count) {
tinyexr::SetErrorMessage("Invalid offset table size.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
offset_data.offsets[l][dy][dx] = offset + 4; // +4 to skip 'part number'
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
}
}
}
}
}
// Decode image.
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
tinyexr::OffsetData &offset_data = chunk_offset_table_list[i];
// First check 'part number' is identitical to 'i'
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l)
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy)
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
const unsigned char *part_number_addr =
memory + offset_data.offsets[l][dy][dx] - 4; // -4 to move to 'part number' field.
unsigned int part_no;
memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4
tinyexr::swap4(&part_no);
if (part_no != i) {
tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
std::string e;
int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_data,
memory, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
return ret;
}
}
return TINYEXR_SUCCESS;
}
int LoadEXRMultipartImageFromFile(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts, const char *filename,
const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts,
&buf.at(0), filesize, err);
}
int SaveEXR(const float *data, int width, int height, int components,
const int save_as_fp16, const char *outfilename, const char **err) {
if ((components == 1) || components == 3 || components == 4) {
// OK
} else {
std::stringstream ss;
ss << "Unsupported component value : " << components << std::endl;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRHeader header;
InitEXRHeader(&header);
if ((width < 16) && (height < 16)) {
// No compression for small image.
header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE;
} else {
header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP;
}
EXRImage image;
InitEXRImage(&image);
image.num_channels = components;
std::vector<float> images[4];
if (components == 1) {
images[0].resize(static_cast<size_t>(width * height));
memcpy(images[0].data(), data, sizeof(float) * size_t(width * height));
} else {
images[0].resize(static_cast<size_t>(width * height));
images[1].resize(static_cast<size_t>(width * height));
images[2].resize(static_cast<size_t>(width * height));
images[3].resize(static_cast<size_t>(width * height));
// Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers
for (size_t i = 0; i < static_cast<size_t>(width * height); i++) {
images[0][i] = data[static_cast<size_t>(components) * i + 0];
images[1][i] = data[static_cast<size_t>(components) * i + 1];
images[2][i] = data[static_cast<size_t>(components) * i + 2];
if (components == 4) {
images[3][i] = data[static_cast<size_t>(components) * i + 3];
}
}
}
float *image_ptr[4] = {0, 0, 0, 0};
if (components == 4) {
image_ptr[0] = &(images[3].at(0)); // A
image_ptr[1] = &(images[2].at(0)); // B
image_ptr[2] = &(images[1].at(0)); // G
image_ptr[3] = &(images[0].at(0)); // R
} else if (components == 3) {
image_ptr[0] = &(images[2].at(0)); // B
image_ptr[1] = &(images[1].at(0)); // G
image_ptr[2] = &(images[0].at(0)); // R
} else if (components == 1) {
image_ptr[0] = &(images[0].at(0)); // A
}
image.images = reinterpret_cast<unsigned char **>(image_ptr);
image.width = width;
image.height = height;
header.num_channels = components;
header.channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels)));
// Must be (A)BGR order, since most of EXR viewers expect this channel order.
if (components == 4) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
strncpy_s(header.channels[1].name, "B", 255);
strncpy_s(header.channels[2].name, "G", 255);
strncpy_s(header.channels[3].name, "R", 255);
#else
strncpy(header.channels[0].name, "A", 255);
strncpy(header.channels[1].name, "B", 255);
strncpy(header.channels[2].name, "G", 255);
strncpy(header.channels[3].name, "R", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
header.channels[1].name[strlen("B")] = '\0';
header.channels[2].name[strlen("G")] = '\0';
header.channels[3].name[strlen("R")] = '\0';
} else if (components == 3) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "B", 255);
strncpy_s(header.channels[1].name, "G", 255);
strncpy_s(header.channels[2].name, "R", 255);
#else
strncpy(header.channels[0].name, "B", 255);
strncpy(header.channels[1].name, "G", 255);
strncpy(header.channels[2].name, "R", 255);
#endif
header.channels[0].name[strlen("B")] = '\0';
header.channels[1].name[strlen("G")] = '\0';
header.channels[2].name[strlen("R")] = '\0';
} else {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
#else
strncpy(header.channels[0].name, "A", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
}
header.pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
header.requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
for (int i = 0; i < header.num_channels; i++) {
header.pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image
if (save_as_fp16 > 0) {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format
} else {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e.
// no precision reduction)
}
}
int ret = SaveEXRImageToFile(&image, &header, outfilename, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
free(header.channels);
free(header.pixel_types);
free(header.requested_pixel_types);
return ret;
}
#ifdef __clang__
// zero-as-null-ppinter-constant
#pragma clang diagnostic pop
#endif
#endif // TINYEXR_IMPLEMENTATION_DEFINED
#endif // TINYEXR_IMPLEMENTATION
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 16;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_binop__band_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__band_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__band_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__band_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__band_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__band_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__band_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__band_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__band_int64)
// C=scalar+B GB (_bind1st__band_int64)
// C=scalar+B' GB (_bind1st_tran__band_int64)
// C=A+scalar GB (_bind2nd__band_int64)
// C=A'+scalar GB (_bind2nd_tran__band_int64)
// C type: int64_t
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = (aij) & (bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) & (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BAND || GxB_NO_INT64 || GxB_NO_BAND_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__band_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__band_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__band_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__band_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__band_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__band_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__band_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__band_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__band_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) & (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__band_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) & (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) & (aij) ; \
}
GrB_Info GB (_bind1st_tran__band_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) & (y) ; \
}
GrB_Info GB (_bind2nd_tran__band_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dropout-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file dropout-inl.h
* \brief
* \author Bing Xu, Da Zheng, Hang Zhang
*/
#ifndef MXNET_OPERATOR_NN_DROPOUT_INL_H_
#define MXNET_OPERATOR_NN_DROPOUT_INL_H_
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include <algorithm>
#include "../mxnet_op.h"
#include "../mshadow_op.h"
#include "../random/sampler.h"
#include "../tensor/elemwise_binary_broadcast_op.h"
#if (MSHADOW_USE_MKL == 1) && defined(_OPENMP) && !defined(__CUDACC__)
#define MXNET_USE_MKL_DROPOUT 1
#endif
#if MXNET_USE_MKL_DROPOUT
#include <omp.h>
#include <mkl_vml_functions.h>
#include <mkl_vsl.h>
#endif // MXNET_USE_MKL_DROPOUT
#define MXNET_USE_CUDNN_DROPOUT MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 7
namespace dropout {
enum DropoutOpInputs {kData};
enum DropoutOpOutputs {kOut, kMask};
enum DropoutOpForwardResource {kRandom};
enum DropoutOpMode {kTraining, kAlways};
} // namespace dropout
namespace mxnet {
namespace op {
const int MAX_DIM = 5;
struct DropoutParam : public dmlc::Parameter<DropoutParam> {
float p;
int mode;
mxnet::TShape axes;
dmlc::optional<bool> cudnn_off;
DMLC_DECLARE_PARAMETER(DropoutParam) {
DMLC_DECLARE_FIELD(p).set_default(0.5)
.set_range(0, 1)
.describe("Fraction of the input that gets dropped out during training time.");
DMLC_DECLARE_FIELD(mode)
.add_enum("training", dropout::kTraining)
.add_enum("always", dropout::kAlways)
.set_default(dropout::kTraining)
.describe("Whether to only turn on dropout during training or to also turn on for inference.");
DMLC_DECLARE_FIELD(axes).set_default(mxnet::TShape(0, 0))
.describe("Axes for variational dropout kernel.");
DMLC_DECLARE_FIELD(cudnn_off).set_default(dmlc::optional<bool>(false))
.describe("Whether to turn off cudnn in dropout operator. "
"This option is ignored if axes is specified.");
}
}; // struct DropoutParam
template<typename xpu, typename DType>
class DropoutOp {
#if MXNET_USE_MKL_DROPOUT
static void BernoulliGenerate(common::random::RandGenerator<cpu, DType> gen,
int n, double p, int* r) {
typename RandGenerator<xpu, DType>::Impl genImpl(&gen, 1);
const int seed = 17 + abs(genImpl.rand() % 4096);
CHECK_GE(seed, 0);
const int nthr = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel num_threads(nthr)
{
const int ithr = omp_get_thread_num();
const int avg_amount = (n + nthr - 1) / nthr;
const int my_offset = ithr * avg_amount;
const int my_amount = std::min(my_offset + avg_amount, n) - my_offset;
if (my_amount > 0) {
VSLStreamStatePtr stream;
vslNewStream(&stream, VSL_BRNG_MCG31, seed);
vslSkipAheadStream(stream, my_offset);
viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, stream, my_amount, r + my_offset, p);
vslDeleteStream(&stream);
}
}
}
static inline bool MKLAvailable() {
// BernoulliGenerate expects an array int, so for types smaller than int, the mask buffer
// will be too small, so we can;t use MKL in those cases
return sizeof(DType) >= sizeof(int);
}
// MKL forward pass
inline void MKLForward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data) {
Stream<xpu> *s = ctx.get_stream<xpu>();
RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>();
CHECK_NOTNULL(pgen);
Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> data = in_data[dropout::kData].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> out = out_data[dropout::kOut].FlatTo2D<xpu, DType>(s);
DType *outptr = out.dptr_;
DType *dataptr = data.dptr_;
auto maskptr = reinterpret_cast<int *>(mask.dptr_);
int count = mask.shape_[0] * mask.shape_[1];
if (sizeof(DType) > sizeof(int)) {
// allocating new buffer to avoiding memory overlapping between `mask.dptr_` and `maskptr`
Tensor<xpu, 1, int> temp = ctx.requested[1].get_space_typed<xpu, 1, int>(Shape1(count), s);
maskptr = temp.dptr_;
}
BernoulliGenerate(*pgen, count, this->pkeep_, maskptr);
const float pk_1 = 1.0f / this->pkeep_;
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = 0; i < count; ++i) {
const DType maskVal = static_cast<DType>(maskptr[i]) * pk_1;
outptr[i] = dataptr[i] * maskVal;
mask.dptr_[i] = maskVal;
}
}
// MKL backward pass
inline void MKLBackward(const OpContext &ctx,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &out_grad) {
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 2, DType> grad = out_grad[dropout::kOut].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> gdata = in_grad[dropout::kData].FlatTo2D<xpu, DType>(s);
DType *ingradptr = gdata.dptr_;
const DType *outgradptr = grad.dptr_;
const DType *maskptr = mask.dptr_;
const int count = mask.shape_[0] * mask.shape_[1];
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = 0; i < count; ++i) {
ingradptr[i] = outgradptr[i] * maskptr[i];
}
}
#endif // #if MXNET_USE_MKL_DROPOUT
public:
/*!
* \brief Dropout kernel, compute dropout tensor
*/
struct DropoutKernel {
/*!
* \brief Dropout kernel function
* \param id Thread number (0-based representing count)
* \param gen Random number generator
* \param N Total number of items in the output
* \param step Step between items, related to parallelism
* \param dropout_out Output dropout values
* \param mask_out Output mask (is multiplied to create dropout output, may be 0)
* \param input_data Input data to perform the dropout on
* \param pkeep Dropout rate (keep when the generated random number is less than this value)
*/
MSHADOW_XINLINE static void Map(index_t id,
RandGenerator<xpu, DType> gen,
const index_t N,
const index_t step,
DType *dropout_out,
DType *mask_out,
const DType *input_data,
const real_t pkeep) {
RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, {
const real_t rand_num = static_cast<real_t>(genImpl.uniform());
mask_out[i] = mshadow_op::threshold_eq::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep);
dropout_out[i] = input_data[i] * mask_out[i];
});
}
};
struct BernoulliKernel {
/*! \brief Bernoulli kernel for generating mask */
MSHADOW_XINLINE static void Map(index_t id,
RandGenerator<xpu, DType> gen,
const index_t N,
const index_t step,
DType *mask_out,
const real_t pkeep) {
RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, {
const real_t rand_num = static_cast<real_t>(genImpl.uniform());
mask_out[i] = mshadow_op::threshold::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep);
});
}
};
explicit DropoutOp(const DropoutParam ¶m, Context ctx) {
this->pkeep_ = 1.0f - param.p;
this->mode_ = static_cast<dropout::DropoutOpMode>(param.mode);
this->axes_ = param.axes;
this->dropout_passthrough_ = true;
#if MXNET_USE_CUDNN_DROPOUT
this->cudnn_off_ = param.cudnn_off && param.cudnn_off.value();
this->ctx_ = ctx;
if (ctx.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) {
dtype_ = mshadow::DataType<DType>::kCudnnFlag;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&y_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&dx_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&dy_desc_));
CUDNN_CALL(cudnnCreateDropoutDescriptor(&dropout_desc_));
}
#endif // MXNET_USE_CUDNN_DROPOUT
}
~DropoutOp() {
#if MXNET_USE_CUDNN_DROPOUT
if (this->ctx_.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) {
CUDNN_CALL(cudnnDestroyTensorDescriptor(x_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(y_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(dx_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(dy_desc_));
CUDNN_CALL(cudnnDestroyDropoutDescriptor(dropout_desc_));
}
#endif // MXNET_USE_CUDNN_DROPOUT
}
#if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
inline bool CuDNNAvailable() {
return this->pkeep_ > 0 && !this->cudnn_off_;
}
inline void CuDNNForward(const OpContext &ctx,
const TBlob &in,
const TBlob &mask,
const TBlob &out) {
Stream<xpu> *s = ctx.get_stream<xpu>();
// set dropout state.
ctx.requested[0].get_cudnn_dropout_desc(&dropout_desc_, s, 1.0f - this->pkeep_, seed_);
// describe input/output tensor
int dim[4], stride[4];
dim[0] = 1;
dim[1] = 1;
dim[2] = 1;
dim[3] = out.Size();
stride[0] = out.Size();
stride[1] = out.Size();
stride[2] = out.Size();
stride[3] = 1;
CUDNN_CALL(cudnnSetTensorNdDescriptor(x_desc_,
dtype_,
4,
dim,
stride));
CUDNN_CALL(cudnnSetTensorNdDescriptor(y_desc_,
dtype_,
4,
dim,
stride));
// perform dropout with cudnn
CUDNN_CALL(cudnnDropoutGetReserveSpaceSize(x_desc_, &dropout_reserve_byte_));
// cudnn uses bits to record the positions that are dropped, so reserve bytes is always
// 1/8 of input size.
CHECK_GE(mask.Size() * sizeof(DType), dropout_reserve_byte_) <<
"The size of the mask space is smaller than the required cudnn reserved space.";
CUDNN_CALL(cudnnDropoutForward(s->dnn_handle_,
dropout_desc_,
x_desc_,
in.dptr<DType>(),
y_desc_,
out.dptr<DType>(),
mask.dptr<DType>(),
dropout_reserve_byte_));
}
inline void CuDNNBackward(const OpContext &ctx,
const TBlob &out_grad,
const TBlob &mask,
const TBlob &in_grad) {
Stream<xpu> *s = ctx.get_stream<xpu>();
// describe input/output tensor
int dim[4], stride[4];
dim[0] = 1;
dim[1] = 1;
dim[2] = 1;
dim[3] = in_grad.Size();
stride[0] = in_grad.Size();
stride[1] = in_grad.Size();
stride[2] = in_grad.Size();
stride[3] = 1;
CUDNN_CALL(cudnnSetTensorNdDescriptor(dy_desc_,
dtype_,
4,
dim,
stride));
CUDNN_CALL(cudnnSetTensorNdDescriptor(dx_desc_,
dtype_,
4,
dim,
stride));
// perform dropout with cudnn
CUDNN_CALL(cudnnDropoutBackward(s->dnn_handle_,
dropout_desc_,
dy_desc_,
out_grad.dptr<DType>(),
dx_desc_,
in_grad.dptr<DType>(),
mask.dptr<DType>(),
dropout_reserve_byte_));
}
#endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data) {
this->dropout_passthrough_ = true;
if (req[dropout::kOut] != kNullOp) {
CHECK_EQ(in_data.size(), 1U);
if (ctx.is_train) {
CHECK_EQ(out_data.size(), 2U);
}
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob &in = in_data[dropout::kData];
const TBlob &out = out_data[dropout::kOut];
const TBlob &mask = out_data[dropout::kMask];
if (this->pkeep_ < 1 && (ctx.is_train || this->mode_ == dropout::kAlways)) {
this->dropout_passthrough_ = false;
if (this->axes_.ndim() == 0) {
#if MXNET_USE_MKL_DROPOUT
if (MKLAvailable()) {
MKLForward(ctx, in_data, out_data);
return;
}
#endif // MXNET_USE_MKL_DROPOUT
#if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
if (CuDNNAvailable()) {
CuDNNForward(ctx, in, mask, out);
return;
}
#endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>();
CHECK_NOTNULL(pgen);
CHECK(req[dropout::kOut] != kAddTo);
LaunchRNG<DropoutKernel, xpu>(s, pgen, out.Size(),
out.dptr<DType>(),
mask.dptr<DType>(),
in.dptr<DType>(),
this->pkeep_);
return;
} else {
RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>();
CHECK_NOTNULL(pgen);
// initialize the mask
LaunchRNG<BernoulliKernel, xpu>(s, pgen, mask.Size(),
mask.dptr<DType>(),
this->pkeep_);
// broadcast mul
mxnet::TShape new_lshape, new_rshape, new_oshape;
int ndim = BinaryBroadcastShapeCompact(in.shape_,
mask.shape_, out.shape_,
&new_lshape, &new_rshape, &new_oshape);
if (!ndim) {
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch(
s, out.Size(), out.dptr<DType>(), in.dptr<DType>(),
mask.dptr<DType>());
});
} else {
BROADCAST_NDIM_SWITCH(ndim, NDim, {
mshadow::Shape<NDim> oshape = new_oshape.get<NDim>();
mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>());
mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>());
mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, DType, DType,
mshadow_op::mul>, xpu>::
template LaunchEx(s, new_oshape.Size(), req[dropout::kOut],
lstride, rstride, oshape,
in.dptr<DType>(),
mask.dptr<DType>(), out.dptr<DType>());
});
}
}
} else {
if (req[dropout::kOut] == kWriteInplace) return;
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch(
s, out.Size(), out.dptr<DType>(), in.dptr<DType>());
});
}
}
}
void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad) {
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
if (!this->dropout_passthrough_) {
this->dropout_passthrough_ = true;
const TBlob &gdata = in_grad[dropout::kData];
const TBlob &grad = out_grad[dropout::kOut];
const TBlob &mask = out_data[dropout::kMask];
if (this->axes_.ndim() == 0) {
#if MXNET_USE_MKL_DROPOUT
if (MKLAvailable()) {
MKLBackward(ctx, in_grad, out_data, out_grad);
return;
}
#endif // MXNET_USE_MKL_DROPOUT
#if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
if (CuDNNAvailable()) {
CuDNNBackward(ctx, grad, mask, gdata);
return;
}
#endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
// standard case for dropout
CHECK_EQ(grad.Size(), mask.Size());
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch(
s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>());
});
return;
} else {
// broardcast mul
mxnet::TShape new_lshape, new_rshape, new_oshape;
int ndim = BinaryBroadcastShapeCompact(grad.shape_,
mask.shape_, gdata.shape_,
&new_lshape, &new_rshape, &new_oshape);
if (!ndim) {
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch(
s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>());
});
} else {
BROADCAST_NDIM_SWITCH(ndim, NDim, {
mshadow::Shape<NDim> oshape = new_oshape.get<NDim>();
mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>());
mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>());
mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, DType, DType,
mshadow_op::mul>, xpu>::
template LaunchEx(s, new_oshape.Size(), req[0], lstride, rstride, oshape,
grad.dptr<DType>(), mask.dptr<DType>(), gdata.dptr<DType>());
});
}
}
} else {
const TBlob& gdata = in_grad[dropout::kData];
const TBlob& grad = out_grad[dropout::kOut];
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch(
s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>());
});
}
}
private:
/*! \brief Dropout rate (keep when the generated random number is less than this value) */
real_t pkeep_;
/*! \brief Dropout mode */
dropout::DropoutOpMode mode_;
/*! \brief Axes on which dropout mask is shared in the form of broadcast multiply */
mxnet::TShape axes_;
/*! \brief Flag to record whether forward is executed in pass-through mode */
bool dropout_passthrough_;
#if MXNET_USE_CUDNN_DROPOUT
bool cudnn_off_;
Context ctx_;
cudnnDataType_t dtype_;
cudnnDropoutDescriptor_t dropout_desc_;
uint64_t seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn)
size_t dropout_reserve_byte_;
cudnnTensorDescriptor_t x_desc_, y_desc_, dx_desc_, dy_desc_;
#endif // MXNET_USE_CUDNN_DROPOUT
}; // class DropoutOp
template<typename xpu>
void DropoutCompute(const OpStatePtr& state,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, {
DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>();
op.Forward(ctx, inputs, req, outputs);
});
}
template<typename xpu>
void DropoutGradCompute(const OpStatePtr& state,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1);
CHECK_EQ(req.size(), 1);
std::vector<TBlob> out_grads(2);
std::vector<TBlob> out_data(2);
out_grads[dropout::kOut] = inputs[0];
out_data[dropout::kMask] = inputs[1];
MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, {
DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>();
op.Backward(ctx, out_grads, out_data, req, outputs);
});
}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_NN_DROPOUT_INL_H_
|
snmp_fmt_plug.c | /* Cracker for SNMPv3 USM hashes, https://tools.ietf.org/html/rfc3414.
*
* This software is Copyright (c) 2017, Dhiru Kholia <dhiru [at] openwall.com>,
* and it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Thanks to https://www.0x0ff.info/2013/snmpv3-authentification/ for the very
* clear explanation of the algorithms involved in SNMPv3 USM.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_snmp;
#elif FMT_REGISTERS_H
john_register_one(&fmt_snmp);
#else
#include <string.h>
#include <stdint.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 8
#endif
#endif
#include "formats.h"
#include "md5.h"
#include "hmacmd5.h"
#include "sha.h"
#include "hmac_sha.h"
#include "misc.h"
#include "common.h"
#include "params.h"
#include "options.h"
#include "memdbg.h"
#define FORMAT_LABEL "SNMP"
#define FORMAT_NAME "SNMPv3 USM"
#define FORMAT_TAG "$SNMPv3$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define ALGORITHM_NAME "HMAC-MD5-96/HMAC-SHA1-96 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(uint32_t)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define MAX_SALT_LEN 1500
static struct fmt_tests tests[] = {
// https://wiki.wireshark.org/SampleCaptures, snmp_usm.pcap, pippo, md5
{"$SNMPv3$1$3$3081b10201033011020430f6f3d5020300ffe304010702010304373035040d80001f888059dc486145a2632202010802020ab90405706970706f040c00000000000000000000000004080000000103d5321a0460826ecf6443956d4c364bfc6f6ffc8ee0df000ffd0955af12d2c0f3c60fadea417d2bb80c0b2c1fa7a46ce44f9f16e15ee830a49881f60ecfa757d2f04000eb39a94058121d88ca20eeef4e6bf06784c67c15f144915d9bc2c6a0461da92a4abe$80001f888059dc486145a26322$19395e67894fda182414849f", "pippoxxx"},
// https://wiki.wireshark.org/SampleCaptures, snmp_usm.pcap, pippo, same as above but with missing algorithm specifier (0 instead of 1)
{"$SNMPv3$0$3$3081b10201033011020430f6f3d5020300ffe304010702010304373035040d80001f888059dc486145a2632202010802020ab90405706970706f040c00000000000000000000000004080000000103d5321a0460826ecf6443956d4c364bfc6f6ffc8ee0df000ffd0955af12d2c0f3c60fadea417d2bb80c0b2c1fa7a46ce44f9f16e15ee830a49881f60ecfa757d2f04000eb39a94058121d88ca20eeef4e6bf06784c67c15f144915d9bc2c6a0461da92a4abe$80001f888059dc486145a26322$19395e67894fda182414849f", "pippoxxx"},
// https://wiki.wireshark.org/SampleCaptures, snmp_usm.pcap, pippo3, sha1
{"$SNMPv3$2$76$30820144020103301102043cdca370020300ffe304010302010304383036040d80001f888059dc486145a2632202010802020aba0406706970706f33040c0000000000000000000000000408f9a7cd5639adc7de0481f12d4e0febddef162199aa61bb97f44b84d975d9cef001d31eed660a193c22362c2ba6d203932822baa6c5d0032cc5cd7a8b7ac7b2fc005820ea72d72ffe59d3696be2bc8d5bdffb2de6fc775ed26cbf2d49a513704867665126775b8ffcaf3c07c19f9ecefb20293af7a6beecb6a5f2e3ba812ed9d71d21679007546f3acc6b72aff2baff2688451e74434dc9e6dab2f1b5e149691ced9fb4283fc8f85e3e7ebbe833353076fbdea7a11bc13a8c5ea62385b519e8bd2ab15f646572f487c8eb471eb0b069c5cc500eb8abc0227746d4ee8a5d9f0d6bfd9ece27f3f99ad5937c3e9be08e3074963796d3a13907fa1f17d213$80001f888059dc486145a26322$3de2a23a91ef278f8277b3f5", "pippoxxx"},
// https://www.0x0ff.info/2013/snmpv3-authentification/
{"$SNMPv3$1$0$30818002010330110204580b8cc7020300ffe30401050201030431302f041180001f888062dc7f4c15465c510000000002010302017c040475736572040c00000000000000000000000004003035041180001f888062dc7f4c15465c51000000000400a11e0204334304ff0201000201003010300e060a2b06010201041e0105010500$80001f888062dc7f4c15465c5100000000$9b1b71e33603a30c125f095d", "useruseruser"},
// UTF-8 password
{"$SNMPv3$1$4$3081a30201033011020416396d42020300ffe304010302010304393037041180001f88804883c95f7803fa580000000002010102016904046c756c75040c00000000000000000000000004080000000166c4ecb40450cee8d8c70a64bc0b508bb2a5625f9916a35a4c1f2d1a4d436c02312edad700a1a21bb23c319b073ed8b2a84d3829961e87af1a30daa443f7408dcc0dbee952b8fb0eab20760488908f31047b31caefba$80001f88804883c95f7803fa5800000000$9fa2a2e12cff0ca34794e988", "1234567£"},
// SNMPv3 over IPv6
{"$SNMPv3$0$4$3081a302010330110204551e91ab020300ffcf04010302010304393037041180001f88804883c95f7803fa580000000002010202015404046c756c75040c00000000000000000000000004080000000296c59db40450b0228ff64c7311310b1c41e63b999087495bb482700f40646ec63e461490ff985436cc8dfd63ed0bc1e66b307eab019bdb406e27df3c175eecbf82504639694efd38e4eff6bd91c524443a962fb331e8$80001f88804883c95f7803fa5800000000$af477d4cc2e0d31e9340acf9", "1234567£"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked, cracked_count;
static struct custom_salt {
uint32_t authProtocol;
unsigned char salt[MAX_SALT_LEN];
uint32_t salt_length;
unsigned char engineID[32]; // has to be in between 5 and 32 (both inclusive)
uint32_t engineLength;
unsigned char msgAuthenticationParameters[12];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt);
cracked_count = self->params.max_keys_per_crypt;
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
int value, extra;
if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += TAG_LENGTH;
if ((p = strtokm(ctcopy, "$")) == NULL) // algorithm
goto err;
if (!isdec(p))
goto err;
value = atoi(p);
if (value != 0 && value != 1 && value != 2)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) // packet number, for debugging
goto err;
if (!isdec(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) // salt (wholeMsg)
goto err;
if (hexlenl(p, &extra) > MAX_SALT_LEN * 2 || extra)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) // msgAuthoritativeEngineID / snmpEngineID
goto err;
if (hexlenl(p, &extra) > 32 * 2 || extra)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) // msgAuthenticationParameters (hash)
goto err;
if (hexlenl(p, &extra) != 12 * 2 || extra)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
int i;
memset(&cs, 0, SALT_SIZE);
ctcopy += TAG_LENGTH;
p = strtokm(ctcopy, "$");
cs.authProtocol = atoi(p);
p = strtokm(NULL, "$");
p = strtokm(NULL, "$");
cs.salt_length = strlen(p) / 2;
for (i = 0; i < cs.salt_length; i++)
cs.salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])];
p = strtokm(NULL, "$");
cs.engineLength = strlen(p) / 2;
for (i = 0; i < cs.engineLength; i++)
cs.engineID[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])];
p = strtokm(NULL, "$");
for (i = 0; i < 12; i++)
cs.msgAuthenticationParameters[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])];
MEM_FREE(keeptr);
return &cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
/* Password to Key Sample Code for MD5, from RFC 3414 A.2.1 and Wireshark */
static void snmp_usm_password_to_key_md5(const uint8_t *password, uint32_t
passwordlen, const uint8_t *engineID, uint32_t engineLength,
uint8_t *key)
{
uint8_t *cp, password_buf[64];
uint32_t password_index = 0;
uint32_t count = 0, i;
MD5_CTX ctx;
MD5_Init(&ctx);
/**********************************************/
/* Use while loop until we've done 1 Megabyte */
/**********************************************/
while (count < 1048576) {
cp = password_buf;
if (passwordlen != 0) {
for (i = 0; i < 64; i++) {
/*************************************************/
/* Take the next octet of the password, wrapping */
/* to the beginning of the password as necessary.*/
/*************************************************/
*cp++ = password[password_index++ % passwordlen];
}
} else {
*cp = 0;
}
MD5_Update(&ctx, password_buf, 64);
count += 64;
}
MD5_Final(key, &ctx);
/*****************************************************/
/* Now localize the key with the engineID and pass */
/* through MD5 to produce final key */
/* May want to ensure that engineLength <= 32, */
/* otherwise need to use a buffer larger than 64 */
/*****************************************************/
memcpy(password_buf, key, 16);
memcpy(password_buf+16, engineID, engineLength);
memcpy(password_buf+16+engineLength, key, 16);
MD5_Init(&ctx);
MD5_Update(&ctx, password_buf, 32+engineLength);
MD5_Final(key, &ctx);
}
/* Password to Key Sample Code for SHA, from RFC 3414 A.2.2 and Wireshark */
static void snmp_usm_password_to_key_sha(const uint8_t *password, uint32_t
passwordlen, const uint8_t *engineID, uint32_t engineLength,
uint8_t *key)
{
uint8_t *cp, password_buf[72];
uint32_t password_index = 0;
uint32_t count = 0, i;
SHA_CTX ctx;
SHA1_Init(&ctx);
/**********************************************/
/* Use while loop until we've done 1 Megabyte */
/**********************************************/
while (count < 1048576) {
cp = password_buf;
if (passwordlen != 0) {
for (i = 0; i < 64; i++) {
/*************************************************/
/* Take the next octet of the password, wrapping */
/* to the beginning of the password as necessary.*/
/*************************************************/
*cp++ = password[password_index++ % passwordlen];
}
} else {
*cp = 0;
}
SHA1_Update(&ctx, password_buf, 64);
count += 64;
}
SHA1_Final(key, &ctx);
/*****************************************************/
/* Now localize the key with the engineID and pass */
/* through SHA to produce final key */
/* May want to ensure that engineLength <= 32, */
/* otherwise need to use a buffer larger than 72 */
/*****************************************************/
memcpy(password_buf, key, 20);
memcpy(password_buf+20, engineID, engineLength);
memcpy(password_buf+20+engineLength, key, 20);
SHA1_Init(&ctx);
SHA1_Update(&ctx, password_buf, 40+engineLength);
SHA1_Final(key, &ctx);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
memset(cracked, 0, sizeof(cracked[0])*cracked_count);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
HMACMD5Context ctx;
unsigned char authKey[20];
unsigned char out[20];
if (cur_salt->authProtocol == 1) {
snmp_usm_password_to_key_md5((const uint8_t *)saved_key[index],
strlen(saved_key[index]),
cur_salt->engineID,
cur_salt->engineLength, authKey);
hmac_md5_init_rfc2104(authKey, 16, &ctx);
hmac_md5_update(cur_salt->salt, cur_salt->salt_length, &ctx);
hmac_md5_final(out, &ctx);
if (memcmp(out, cur_salt->msgAuthenticationParameters, 12) == 0)
cracked[index] = 1;
else
cracked[index] = 0;
} else if (cur_salt->authProtocol == 2) {
snmp_usm_password_to_key_sha((const uint8_t *)saved_key[index],
strlen(saved_key[index]),
cur_salt->engineID,
cur_salt->engineLength, authKey);
hmac_sha1(authKey, 20, cur_salt->salt, cur_salt->salt_length, out, 12);
if (memcmp(out, cur_salt->msgAuthenticationParameters, 12) == 0)
cracked[index] = 1;
else
cracked[index] = 0;
} else if (cur_salt->authProtocol == 0) {
cracked[index] = 0;
snmp_usm_password_to_key_md5((const uint8_t *)saved_key[index],
strlen(saved_key[index]),
cur_salt->engineID,
cur_salt->engineLength, authKey);
hmac_md5_init_rfc2104(authKey, 16, &ctx);
hmac_md5_update(cur_salt->salt, cur_salt->salt_length, &ctx);
hmac_md5_final(out, &ctx);
if (memcmp(out, cur_salt->msgAuthenticationParameters, 12) == 0) {
cracked[index] = 1;
continue;
}
snmp_usm_password_to_key_sha((const uint8_t *)saved_key[index],
strlen(saved_key[index]),
cur_salt->engineID,
cur_salt->engineLength, authKey);
hmac_sha1(authKey, 20, cur_salt->salt, cur_salt->salt_length, out, 12);
if (memcmp(out, cur_salt->msgAuthenticationParameters, 12) == 0)
cracked[index] = 1;
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void snmp_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_snmp = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
snmp_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
simple_mortar_mapper_process.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_SIMPLE_MORTAR_MAPPER_PROCESS)
#define KRATOS_SIMPLE_MORTAR_MAPPER_PROCESS
// System includes
#include <unordered_map>
// External includes
// Project includes
#include "processes/process.h"
#include "includes/kratos_parameters.h"
#include "includes/model_part.h"
#include "spaces/ublas_space.h"
#include "linear_solvers/linear_solver.h"
/* Custom includes */
#include "includes/mortar_classes.h"
/* Custom utilities */
#include "utilities/exact_mortar_segmentation_utility.h"
/* Tree structures */
// #include "spatial_containers/bounding_volume_tree.h" // k-DOP
#include "spatial_containers/spatial_containers.h" // kd-tree
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
/// The definition of the size type
typedef std::size_t SizeType;
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @ingroup KratosCore
* @class PointMapper
* @brief Custom Point container to be used by the mapper
* @details The main difference with this point and the base one is that it contains the pointer to geometrical object where the center of the points belongs
* @author Vicente Mataix Ferrandiz
*/
class PointMapper
: public Point
{
public:
///@name Type Definitions
///@{
typedef Point BaseType;
/// Counted pointer of PointMapper
KRATOS_CLASS_POINTER_DEFINITION( PointMapper );
///@}
///@name Life Cycle
///@{
/// Default constructors
PointMapper():
BaseType(),
mpOriginGeometricalObject(nullptr)
{}
PointMapper(const array_1d<double, 3>& Coords)
:BaseType(Coords),
mpOriginGeometricalObject(nullptr)
{}
PointMapper(GeometricalObject::Pointer pGeometricalObject):
mpOriginGeometricalObject(pGeometricalObject)
{
UpdatePoint();
}
PointMapper(
const array_1d<double, 3>& Coords,
GeometricalObject::Pointer pGeometricalObject
):
BaseType(Coords),
mpOriginGeometricalObject(pGeometricalObject)
{}
///Copy constructor (not really required)
PointMapper(const PointMapper& rhs):
BaseType(rhs),
mpOriginGeometricalObject(rhs.mpOriginGeometricalObject)
{
}
/// Destructor.
~PointMapper() override= default;
///@}
///@name Operations
///@{
/**
* @brief Returns the point
* @return The point
*/
BaseType GetPoint()
{
BaseType Point(this->Coordinates());
return Point;
}
/**
* @brief Set the point
* @param Point The point
*/
void SetPoint(const BaseType Point)
{
this->Coordinates() = Point.Coordinates();
}
/**
* @brief Sets the geometrical object associated to the point
* @param pGeometricalObject The pointer to the geometrical object
*/
void SetCondition(GeometricalObject::Pointer pGeometricalObject)
{
mpOriginGeometricalObject = pGeometricalObject;
}
/**
* @brief Returns the geometrical object associated to the point
* @return mpOriginGeometricalObject The pointer to the geometrical object associated to the point
*/
GeometricalObject::Pointer GetGeometricalObject()
{
KRATOS_DEBUG_ERROR_IF(mpOriginGeometricalObject.get() == nullptr) << "GeometricalObject no initialized in the PointMapper class" << std::endl;
return mpOriginGeometricalObject;
}
/**
* @brief This method checks everything is right
*/
void Check()
{
KRATOS_TRY;
auto aux_coord = Kratos::make_shared<array_1d<double, 3>>(this->Coordinates());
KRATOS_ERROR_IF(!aux_coord) << "Coordinates no initialized in the PointMapper class" << std::endl;
KRATOS_ERROR_IF(mpOriginGeometricalObject->use_count() == 0) << "GeometricalObject no initialized in the PointMapper class" << std::endl;
KRATOS_CATCH("");
}
/**
* @brief This function updates the database, using as base for the coordinates the geometrical object center
*/
void UpdatePoint()
{
#ifdef KRATOS_USE_AMATRIX // This macro definition is for the migration period and to be removed afterward please do not use it
this->Coordinates() = mpOriginGeometricalObject->GetGeometry().Center().Coordinates();
#else
noalias(this->Coordinates()) = mpOriginGeometricalObject->GetGeometry().Center().Coordinates();
#endif // ifdef KRATOS_USE_AMATRIX
}
private:
///@name Member Variables
///@{
GeometricalObject::Pointer mpOriginGeometricalObject; /// GeometricalObject pointer
///@}
}; // Class PointMapper
/**
* @ingroup KratosCore
* @class SimpleMortarMapperProcess
* @brief This is basic mapper of values between domains using mortar formulation
* @details Using the dual mortar formulation the resolution of the system of equations is not needed.
* Several types of constructors are avaible depending of the needs.
* If the pairs sets are not provided a serach will be performed using a KDTree
* @author Vicente Mataix Ferrandiz
* @tparam TDim The dimension of work
* @tparam TNumNodes The number of nodes of the slave
* @tparam TNumNodesMaster The number of nodes of the master
*/
template< const SizeType TDim, const SizeType TNumNodes, class TVarType, const SizeType TNumNodesMaster = TNumNodes>
class KRATOS_API(KRATOS_CORE) SimpleMortarMapperProcess
: public Process
{
public:
///@name Type Definitions
///@{
// DEFINITION OF FLAGS TO CONTROL THE BEHAVIOUR
KRATOS_DEFINE_LOCAL_FLAG(AVERAGE_NORMAL); /// If using average normal
KRATOS_DEFINE_LOCAL_FLAG(DISCONTINOUS_INTERFACE); /// If interface is discontinous
KRATOS_DEFINE_LOCAL_FLAG(ORIGIN_IS_HISTORICAL); /// If the origin variables is historical
KRATOS_DEFINE_LOCAL_FLAG(DESTINATION_IS_HISTORICAL); /// If the destination variables is historical
KRATOS_DEFINE_LOCAL_FLAG(ORIGIN_SKIN_IS_CONDITION_BASED); /// If the entities to take into account on the origin model part are the conditions, otherwise we will take elements into consideration
KRATOS_DEFINE_LOCAL_FLAG(DESTINATION_SKIN_IS_CONDITION_BASED); /// If the entities to take into account on the destination model part are the conditions, otherwise we will take elements into consideration
/// Pointer definition of SimpleMortarMapperProcess
KRATOS_CLASS_POINTER_DEFINITION(SimpleMortarMapperProcess);
typedef Point PointType;
typedef Node<3> NodeType;
typedef Geometry<NodeType> GeometryType;
typedef Geometry<PointType> GeometryPointType;
/// Type definition for integration methods
typedef GeometryData::IntegrationMethod IntegrationMethod;
/// Auxiliar geometries
typedef Line2D2<PointType> LineType;
typedef Triangle3D3<PointType> TriangleType;
typedef typename std::conditional<TDim == 2, LineType, TriangleType >::type DecompositionType;
/// Component type
typedef VariableComponent< VectorComponentAdaptor<array_1d<double, 3> > > component_type;
/// Linear solver
typedef UblasSpace<double, CompressedMatrix, Vector> SparseSpaceType;
typedef UblasSpace<double, Matrix, Vector> LocalSpaceType;
typedef typename SparseSpaceType::MatrixType MatrixType;
typedef typename SparseSpaceType::VectorType VectorType;
typedef LinearSolver<SparseSpaceType, LocalSpaceType > LinearSolverType;
/// Component type
typedef VariableComponent< VectorComponentAdaptor<array_1d<double, 3> > > ComponentType;
/// Index type definition
typedef std::size_t IndexType;
/// A map for integers
typedef std::unordered_map<IndexType, IndexType> IntMap;
/// BoundedMatrix
typedef BoundedMatrix<double, TNumNodes, TNumNodes> BoundedMatrixType;
// Type definitions for the tree
typedef PointMapper PointMapperType;
typedef PointMapperType::Pointer PointTypePointer;
typedef std::vector<PointTypePointer> PointVector;
typedef PointVector::iterator PointIterator;
typedef std::vector<double> DistanceVector;
typedef DistanceVector::iterator DistanceIterator;
// KDtree definitions
typedef Bucket< 3ul, PointMapperType, PointVector, PointTypePointer, PointIterator, DistanceIterator > BucketType;
typedef Tree< KDTreePartition<BucketType> > KDTreeType;
/// Mortar definition
typedef MortarKinematicVariables<TNumNodes, TNumNodesMaster> MortarKinematicVariablesType;
typedef MortarOperator<TNumNodes, TNumNodesMaster> MortarOperatorType;
typedef DualLagrangeMultiplierOperators<TNumNodes, TNumNodesMaster> DualLagrangeMultiplierOperatorsType;
typedef ExactMortarIntegrationUtility<TDim, TNumNodes, false, TNumNodesMaster> ExactMortarIntegrationUtilityType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor
* @param rOriginModelPart The origin model part to compute
* @param rDestinationModelPart The destination model part to compute
* @param ThisParameters The configuration parameters
* @param pThisLinearSolver The pointer to the linear to be used (in case of implicit resolution)
*/
SimpleMortarMapperProcess(
ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
Parameters ThisParameters = Parameters(R"({})" ),
LinearSolverType::Pointer pThisLinearSolver = nullptr
);
/**
* @brief Default constructor
* @param rOriginModelPart The origin model part to compute
* @param rDestinationModelPart The destination model part to compute
* @param rThisVariable The variable to transfer and be transfered
* @param ThisParameters The configuration parameters
* @param pThisLinearSolver The pointer to the linear to be used (in case of implicit resolution)
*/
SimpleMortarMapperProcess(
ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
TVarType& rThisVariable,
Parameters ThisParameters = Parameters(R"({})" ),
LinearSolverType::Pointer pThisLinearSolver = nullptr
);
/**
* @brief A constructor where two different variables can be considered for each subdomain
* @param rOriginModelPart The origin model part to compute
* @param rDestinationModelPart The destination model part to compute
* @param rOriginVariable The variable to transfer
* @param rDestinationVariable The variable to be transfered
* @param ThisParameters The configuration parameters
* @param pThisLinearSolver The pointer to the linear to be used (in case of implicit resolution)
*/
SimpleMortarMapperProcess(
ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
TVarType& rOriginVariable,
TVarType& rDestinationVariable,
Parameters ThisParameters = Parameters(R"({})" ),
LinearSolverType::Pointer pThisLinearSolver = nullptr
);
/// Destructor.
~SimpleMortarMapperProcess() override = default;
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
///@}
///@name Friends
///@{
///@}
///@name Operators
///@{
void operator()()
{
Execute();
}
///@}
///@name Operations
///@{
/**
* @brief Execute method is used to execute the Process algorithms.
*/
void Execute() override;
/**
* @details This function will be executed at every time step BEFORE performing the solve phase
*/
void ExecuteInitializeSolutionStep() override;
/**
* @brief This method is a direct map between the origin and destination modelpart with custom variables
* @param rOriginVariable The origin model part
* @param rDestinationVariable The destination model part
* @param Flag The flags to special settings. Right now does nothing
*/
void Map(
TVarType& rOriginVariable,
TVarType& rDestinationVariable,
const Flags Flag = Flags()
)
{
// Reassign the variables
mpOriginVariable = &rOriginVariable;
mpDestinationVariable = &rDestinationVariable;
// Execute the process
Execute();
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "SimpleMortarMapperProcess";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << "SimpleMortarMapperProcess";
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
ModelPart& mOriginModelPart; /// The origin model part to compute
ModelPart& mDestinationModelPart; /// The destination model part to compute
const TVarType* mpOriginVariable; /// The origin variable to map
const TVarType* mpDestinationVariable; /// The destiny variable to map
double mMappingCoefficient = 1.0; /// The mapping coefficient
Flags mOptions; /// Local flags
unsigned int mEchoLevel = 0; /// The verbosity level
Parameters mThisParameters; /// The configuration parameters
LinearSolverType::Pointer mpThisLinearSolver; /// The linear solver used to compute the solution
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* @brief Check if the pairs has been created
*/
void CheckAndPerformSearch();
/**
* @brief This method resets the nodal area
*/
void ResetNodalArea();
/**
* @brief This method gets the max area of the conditions from the modelpart
*/
double GetReferenceArea();
/**
* @brief This method assemble locally the mortar operators
* @param rGeometricalObjectsPointSlave The list of points that form the triangle decomposition
* @param rSlaveGeometry The slave geometry
* @param rMasterGeometry The master geometry
* @param rMasterNormal The normal vector of the master geometry
* @param rThisKinematicVariables The kinematic variables of the geometries, needed to integrate the mortar operators
* @param rThisMortarOperators The mortar operators
* @param rThisIntegrationMethod The integration method used, determines the integration order
* @param Ae The dual lagrange multiplier operator
*/
void AssemblyMortarOperators(
const std::vector<array_1d<PointType,TDim>>& rGeometricalObjectsPointSlave,
const GeometryType& rSlaveGeometry,
const GeometryType& rMasterGeometry,
const array_1d<double, 3>& rMasterNormal,
MortarKinematicVariablesType& rThisKinematicVariables,
MortarOperatorType& rThisMortarOperators,
const IntegrationMethod& rThisIntegrationMethod,
const BoundedMatrixType Ae = IdentityMatrix(TNumNodes)
);
/**
* @brief This method computes the Ae matrix
* @param rSlaveGeometry The slave geometry
* @param rThisKinematicVariables The kinematic variables
* @param rConditionsPointsSlave The list of decomposed triangles
* @param rThisIntegrationMethod The integration method considered
* @return Ae: The matrix of dual LM
*/
static inline BoundedMatrixType CalculateAe(
GeometryType& rSlaveGeometry,
MortarKinematicVariablesType& rThisKinematicVariables,
std::vector<array_1d<PointType,TDim>>& rConditionsPointsSlave,
const IntegrationMethod& rThisIntegrationMethod
);
/**
* @brief This method inverts a diagonal matrix
* @param rInputMatrix The matrix to invert
* @return The matrix inverted
*/
static inline BoundedMatrixType InvertDiagonalMatrix(const BoundedMatrixType& rInputMatrix);
/**
* @brief This method inverts a diagonal matrix
* @param rInputMatrix The matrix to invert
* @param rInvertedMatrix The matrix inverted
*/
static inline void InvertDiagonalMatrix(
const BoundedMatrixType& rInputMatrix,
BoundedMatrixType& rInvertedMatrix
);
/**
* @brief This method lumps a matrix
* @param rInputMatrix The matrix to lump
*/
void LumpMatrix(BoundedMatrixType& rInputMatrix);
/**
* @brief This method computes the size of the system
* @param rSizeSystem The size of the system
*/
void GetSystemSize(SizeType& rSizeSystem);
/**
* @brief This method creates a slave database needed to assemble the system
* @param rSizeSystem The size of the system
* @param rConectivityDatabase The database that will be used to assemble the system
* @param rInverseConectivityDatabase The inverse database that will be used to assemble the system
*/
void CreateSlaveConectivityDatabase(
SizeType& rSizeSystem,
IntMap& rConectivityDatabase,
IntMap& rInverseConectivityDatabase
);
/**
* @brief This method returns the corresponding integration order considered
* @return The integration order considered
*/
IntegrationMethod GetIntegrationMethod();
/**
* @brief This method checks if all components of a vector are true
* @param rVectorToCheck The vector to check
* @return result True if all componets are true
*/
bool CheckWholeVector(std::vector<bool>& rVectorToCheck);
/**
* @brief This method computes the residual matrix of the mapping
* @param rResidualMatrix The matrix containing the residual of the mappping
* @param rSlaveGeometry The slave geometry
* @param rMasterGeometry The master geometry
* @param rThisMortarOperators The mortar operators
*/
void ComputeResidualMatrix(
Matrix& rResidualMatrix,
const GeometryType& rSlaveGeometry,
const GeometryType& rMasterGeometry,
const MortarOperatorType& rThisMortarOperators
);
/**
* @brief This method assembles the LHS and the RHS
* @param rA The LHS of the system
* @param rb The RHS of the system
* @param VariableSize The size of the variable
* @param rResidualMatrix The matrix containing the residual of the mappping
* @param rSlaveGeometry The slave geometry
* @param rInverseConectivityDatabase The inverse database that will be used to assemble the system
* @param rThisMortarOperators The mortar operators
*/
void AssembleRHSAndLHS(
MatrixType& rA,
std::vector<VectorType>& rb,
const SizeType VariableSize,
const Matrix& rResidualMatrix,
const GeometryType& rSlaveGeometry,
IntMap& rInverseConectivityDatabase,
const MortarOperatorType& rThisMortarOperators
);
/**
* @brief This method assembles the RHS
* @param rb The RHS of the system
* @param VariableSize The size of the variable
* @param rResidualMatrix The matrix containing the residual of the mappping
* @param rSlaveGeometry The slave geometry
* @param rInverseConectivityDatabase The inverse database that will be used to assemble the system
*/
void AssembleRHS(
std::vector<VectorType>& rb,
const SizeType VariableSize,
const Matrix& rResidualMatrix,
const GeometryType& rSlaveGeometry,
IntMap& rInverseConectivityDatabase
);
/**
* @brief This method executes the explicit mapping (when no linear solver is avalaible)
*/
void ExecuteExplicitMapping();
/**
* @brief This method executes the mapping when a linear solver is avalaible and a system of equations can be solved
*/
void ExecuteImplicitMapping();
/**
* @brief This method computes common methods between the implicit and explicit formulation
* @param rA The LHS of the system
* @param rb The RHS of the system
* @param rInverseConectivityDatabase The inverse database that will be used to assemble the system
* @param pIndexesPairs The pointer to indexed objects
* @param pGeometricalObject Pointer of a geometrical object
* @param rIntegrationUtility An integration utility for mortar
* @param rThisKineticVariables Kinematic variables (shape functions)
* @param rThisMortarOperators The mortar operators
* @param Iteration The current non-linear iteration
* @tparam TClassType The class of index pairs considered
* @tparam TImplicit If we solve with lamping or we use a linear solver
*/
template<class TClassType, bool TImplicit = false>
void PerformMortarOperations(
MatrixType& rA,
std::vector<VectorType>& rb,
IntMap& rInverseConectivityDatabase,
typename TClassType::Pointer pIndexesPairs,
GeometricalObject::Pointer pGeometricalObject,
ExactMortarIntegrationUtilityType& rIntegrationUtility,
MortarKinematicVariablesType& rThisKineticVariables,
MortarOperatorType& rThisMortarOperators,
const IndexType Iteration
)
{
// The root model part
ModelPart& r_root_model_part = mOriginModelPart.GetRootModelPart();
// Getting the auxiliar variable
const TVarType& r_aux_variable = KratosComponents<TVarType>::Get(MortarUtilities::GetAuxiliarVariable<TVarType>());
// Indexes of the pair to be removed
std::vector<IndexType> indexes_to_remove, geometrical_objects_to_erase;
// Getting discontinous factor
const double discontinous_interface_factor = mOptions.Is(DISCONTINOUS_INTERFACE) ? mThisParameters["discontinous_interface_factor"].GetDouble() : 1.0;
// Declare auxiliar coordinates
GeometryType::CoordinatesArrayType aux_coords;
// Geometrical values
auto& r_slave_geometry = pGeometricalObject->GetGeometry();
r_slave_geometry.PointLocalCoordinates(aux_coords, r_slave_geometry.Center());
const array_1d<double, 3> slave_normal = r_slave_geometry.UnitNormal(aux_coords);
for (auto it_pair = pIndexesPairs->begin(); it_pair != pIndexesPairs->end(); ++it_pair ) {
const IndexType master_id = pIndexesPairs->GetId(it_pair); // MASTER
const auto& r_master_geometry = mOptions.Is(ORIGIN_SKIN_IS_CONDITION_BASED) ? mOriginModelPart.pGetCondition(master_id)->GetGeometry() : mOriginModelPart.pGetElement(master_id)->GetGeometry();
r_master_geometry.PointLocalCoordinates(aux_coords, r_master_geometry.Center());
const array_1d<double, 3> master_normal = r_master_geometry.UnitNormal(aux_coords);
const IntegrationMethod& r_integration_method = GetIntegrationMethod();
// Reading integration points
std::vector<array_1d<PointType,TDim>> geometrical_objects_points_slave; // These are the segmentation points, with this points it is possible to create the lines or triangles used on the mapping
const bool is_inside = rIntegrationUtility.GetExactIntegration(r_slave_geometry, slave_normal, r_master_geometry, master_normal, geometrical_objects_points_slave);
if (is_inside) {
// Initialize general variables for the current master element
rThisKineticVariables.Initialize();
// Initialize the mortar operators
rThisMortarOperators.Initialize();
const BoundedMatrixType Ae = CalculateAe(r_slave_geometry, rThisKineticVariables, geometrical_objects_points_slave, r_integration_method);
AssemblyMortarOperators( geometrical_objects_points_slave, r_slave_geometry, r_master_geometry,master_normal, rThisKineticVariables, rThisMortarOperators, r_integration_method, Ae);
/* We compute the residual */
const IndexType size_to_compute = MortarUtilities::SizeToCompute<TDim, TVarType>();
Matrix residual_matrix(TNumNodes, size_to_compute);
ComputeResidualMatrix(residual_matrix, r_slave_geometry, r_master_geometry, rThisMortarOperators);
if (!TImplicit) {
MortarUtilities::AddValue<TVarType, MortarUtilitiesSettings::SaveAsNonHistoricalVariable>(r_slave_geometry, r_aux_variable, residual_matrix);
}
// We check if DOperator is diagonal
if (mEchoLevel > 1) {
BoundedMatrixType aux_copy_D = rThisMortarOperators.DOperator;
LumpMatrix(aux_copy_D);
const BoundedMatrixType aux_diff = aux_copy_D - rThisMortarOperators.DOperator;
const double norm_diff = norm_frobenius(aux_diff);
if (norm_diff > 1.0e-4)
KRATOS_WARNING("D OPERATOR") << " THE MORTAR OPERATOR D IS NOT DIAGONAL" << std::endl;
if (mEchoLevel == 3) {
KRATOS_WATCH(norm_diff);
KRATOS_WATCH(rThisMortarOperators.DOperator);
}
}
if (Iteration == 0) { // Just assembled the first iteration
if (TImplicit) {
/* We compute the residual and assemble */
const SizeType variable_size = MortarUtilities::SizeToCompute<TDim, TVarType>();
AssembleRHSAndLHS(rA, rb, variable_size, residual_matrix, r_slave_geometry, rInverseConectivityDatabase, rThisMortarOperators);
} else {
for (IndexType i_node = 0; i_node < TNumNodes; ++i_node) {
double& r_nodal_area = r_slave_geometry[i_node].GetValue(NODAL_AREA);
#pragma omp atomic
r_nodal_area += rThisMortarOperators.DOperator(i_node, i_node);
}
// In case of discontinous interface we add contribution to near nodes
if (mOptions.Is(DISCONTINOUS_INTERFACE)) {
const double element_length = r_slave_geometry.Length();
// Iterating over nodes
for (IndexType i_node = 0; i_node < TNumNodes; ++i_node) {
const double nodal_area_contribution = rThisMortarOperators.DOperator(i_node, i_node);
// The original node coordinates
const auto& r_slave_node_coordinates = r_slave_geometry[i_node].Coordinates();
// Iterating over other paired geometrical objects
const auto& r_index_masp_master = mOptions.Is(ORIGIN_SKIN_IS_CONDITION_BASED) ? mOriginModelPart.pGetCondition(master_id)->GetValue(INDEX_SET) : mOriginModelPart.pGetElement(master_id)->GetValue(INDEX_SET);
for (auto it_master_pair = r_index_masp_master->begin(); it_master_pair != r_index_masp_master->end(); ++it_master_pair ) {
const IndexType auxiliar_slave_id = r_index_masp_master->GetId(it_master_pair);
if (pGeometricalObject->Id() != auxiliar_slave_id) {
GeometryType& r_auxiliar_slave_geometry = mOptions.Is(DESTINATION_SKIN_IS_CONDITION_BASED) ? mDestinationModelPart.pGetCondition(auxiliar_slave_id)->GetGeometry() : mDestinationModelPart.pGetElement(auxiliar_slave_id)->GetGeometry();
for (IndexType j_node = 0; j_node < TNumNodes; ++j_node) {
// The auxiliar node coordinates
const auto& r_auxiliar_slave_node_coordinates = r_auxiliar_slave_geometry[j_node].Coordinates();
const double distance = norm_2(r_auxiliar_slave_node_coordinates - r_slave_node_coordinates);
const double contribution_coeff = 1.0/std::pow((1.0 + distance/(discontinous_interface_factor * element_length)), 2);
double& r_nodal_area = r_auxiliar_slave_geometry[j_node].GetValue(NODAL_AREA);
#pragma omp atomic
r_nodal_area += contribution_coeff * nodal_area_contribution;
}
}
}
}
}
}
} else if (TImplicit) {
const SizeType variable_size = MortarUtilities::SizeToCompute<TDim, TVarType>();
AssembleRHS(rb, variable_size, residual_matrix, r_slave_geometry, rInverseConectivityDatabase);
}
} else { // NOTE: The geometrical object considered maybe is to tight
indexes_to_remove.push_back(master_id);
const IndexType other_id = pIndexesPairs->GetOtherId(it_pair);
if (std::is_same<TClassType, IndexMap>::value && other_id != 0) {
geometrical_objects_to_erase.push_back(other_id);
}
}
}
// Clear indexes
for (IndexType i_to_remove = 0; i_to_remove < indexes_to_remove.size(); ++i_to_remove) {
if (mOptions.Is(ORIGIN_SKIN_IS_CONDITION_BASED)) {
for (auto& id : geometrical_objects_to_erase ) {
auto p_cond = r_root_model_part.pGetCondition(id);
p_cond->Set(TO_ERASE, true);
}
} else {
for (auto& id : geometrical_objects_to_erase ) {
auto p_elem = r_root_model_part.pGetElement(id);
p_elem->Set(TO_ERASE, true);
}
}
pIndexesPairs->RemoveId(indexes_to_remove[i_to_remove]);
}
}
/**
* @brief This method can be used to clear the unused indexes
* @param pIndexesPairs The pointer to indexed objects
* @param pGeometricalObject Pointer of a geometrical object
* @param rIntegrationUtility An integration utility for mortar
* @tparam TClassType The class of index pairs considered
*/
template<class TClassType>
void ClearIndexes(
typename TClassType::Pointer pIndexesPairs,
GeometricalObject::Pointer pGeometricalObject,
ExactMortarIntegrationUtilityType& rIntegrationUtility
)
{
// The root model part
ModelPart& r_root_model_part = mOriginModelPart.GetRootModelPart();
// Indexes of the pair to be removed
std::vector<IndexType> indexes_to_remove, geometrical_objects_to_erase;
// Declare auxiliar coordinates
GeometryType::CoordinatesArrayType aux_coords;
// Geometrical values
auto& r_slave_geometry = pGeometricalObject->GetGeometry();
r_slave_geometry.PointLocalCoordinates(aux_coords, r_slave_geometry.Center());
const array_1d<double, 3> slave_normal = r_slave_geometry.UnitNormal(aux_coords);
for (auto it_pair = pIndexesPairs->begin(); it_pair != pIndexesPairs->end(); ++it_pair ) {
const IndexType master_id = pIndexesPairs->GetId(it_pair); // MASTER
const auto& r_master_geometry = mOptions.Is(ORIGIN_SKIN_IS_CONDITION_BASED) ? mOriginModelPart.pGetCondition(master_id)->GetGeometry() : mOriginModelPart.pGetElement(master_id)->GetGeometry();
r_master_geometry.PointLocalCoordinates(aux_coords, r_master_geometry.Center());
const array_1d<double, 3> master_normal = r_master_geometry.UnitNormal(aux_coords);
// Reading integration points
std::vector<array_1d<PointType,TDim>> geometrical_objects_points_slave; // These are the segmentation points, with this points it is possible to create the lines or triangles used on the mapping
const bool is_inside = rIntegrationUtility.GetExactIntegration(r_slave_geometry, slave_normal, r_master_geometry, master_normal, geometrical_objects_points_slave);
if (!is_inside) {
indexes_to_remove.push_back(master_id);
const IndexType other_id = pIndexesPairs->GetOtherId(it_pair);
if (std::is_same<TClassType, IndexMap>::value && other_id != 0) {
geometrical_objects_to_erase.push_back(other_id);
}
}
}
// Clear indexes
for (IndexType i_to_remove = 0; i_to_remove < indexes_to_remove.size(); ++i_to_remove) {
if (mOptions.Is(ORIGIN_SKIN_IS_CONDITION_BASED)) {
for (auto& id : geometrical_objects_to_erase ) {
auto p_cond = r_root_model_part.pGetCondition(id);
p_cond->Set(TO_ERASE, true);
}
} else {
for (auto& id : geometrical_objects_to_erase ) {
auto p_elem = r_root_model_part.pGetElement(id);
p_elem->Set(TO_ERASE, true);
}
}
pIndexesPairs->RemoveId(indexes_to_remove[i_to_remove]);
}
}
/**
* @brief This method fills the database
* @param pGeometricalObject Pointer of a geometrical object
* @param rTreePoints The search tree
* @param AllocationSize The allocation size of the tree
* @param SearchFactor The search factor of the tree
*/
template<class TEntity>
void FillDatabase(
typename TEntity::Pointer pGeometricalObject,
KDTreeType& rTreePoints,
const SizeType AllocationSize,
const double SearchFactor
)
{
// Initialize values
PointVector points_found(AllocationSize);
GeometryType& r_geometry = pGeometricalObject->GetGeometry();
const Point center = r_geometry.Center();
double radius = 0.0;
for(IndexType i_node = 0; i_node < r_geometry.PointsNumber(); ++i_node) {
const array_1d<double, 3> aux_vector = center.Coordinates() - r_geometry[i_node].Coordinates();
const double aux_value = inner_prod(aux_vector, aux_vector);
if(aux_value > radius) radius = aux_value;
}
const double search_radius = SearchFactor * std::sqrt(radius);
const SizeType number_points_found = rTreePoints.SearchInRadius(center, search_radius, points_found.begin(), AllocationSize);
if (number_points_found > 0) {
// In case of missing is created
if (!pGeometricalObject->Has(INDEX_SET))
pGeometricalObject->SetValue(INDEX_SET, Kratos::make_shared<IndexSet>());
// Accessing to the index set
IndexSet::Pointer indexes_set = pGeometricalObject->GetValue(INDEX_SET);
for (IndexType i_point = 0; i_point < number_points_found; ++i_point ) {
auto p_geometrical_object_master = points_found[i_point]->GetGeometricalObject();
indexes_set->AddId(p_geometrical_object_master->Id());
}
}
}
/**
* @brief This method creates an inverse database
*/
void CreateInverseDatabase();
/**
* @brief Reset the interface database
* @details This method resets the mapping database saved in the destination database.
* @note Note that this needs to be done if such modelpart has changed its number of nodes or geometrical objects. This needs to be done even though the mapping instance is deleted since such information is saved in the destination nodes and geometrical objects.
*/
void UpdateInterface();
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
*/
Parameters GetDefaultParameters();
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
/// Assignment operator.
SimpleMortarMapperProcess& operator=(SimpleMortarMapperProcess const& rOther) = delete;
/// Copy constructor.
//SimpleMortarMapperProcess(SimpleMortarMapperProcess const& rOther);
///@}
};// class SimpleMortarMapperProcess
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} // namespace Kratos.
#endif /* KRATOS_SIMPLE_MORTAR_MAPPER_PROCESS defined */
|
LockFreePoolWithList.h | #pragma once
#include <omp.h>
#include <cstdlib>
#include <iostream>
#include <vector>
#include "common.h"
#include <cassert>
#include <cstring>
#include <sys/mman.h>
#define MAP_HUGE_2MB (21 << MAP_HUGE_SHIFT)
template <u64 MAX_THREADS = 16, u64 MAX_SEGMENT_BITS = 32, u64 BLOCK_SIZE = (1UL << 22)>
class LockFreePoolWithList {
alignas(64) void* __restrict nextFreePtrs[MAX_THREADS][MAX_SEGMENT_BITS];
public:
#ifdef CALC_MEM_PER_EDGE
u64 totMem = 0;
#endif
LockFreePoolWithList(){
memset(nextFreePtrs, 0, MAX_THREADS * MAX_SEGMENT_BITS * sizeof(void*));
}
void* allocLog2(u64 log2size){
#ifdef _OPENMP
void* __restrict * nextPtr = &nextFreePtrs[omp_get_thread_num()][log2size];
#else
void* __restrict * nextPtr = &nextFreePtrs[0][log2size];
#endif
if(__builtin_expect(*nextPtr == nullptr, 0)){
const u64 segSize = 1UL << log2size;
const u64 blockSize = (segSize <= BLOCK_SIZE) ? BLOCK_SIZE : segSize;
#ifdef USE_HUGEPAGE
void* blockStart = mmap(NULL, blockSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB |
MAP_HUGE_2MB, -1, 0);
#else
void* blockStart = aligned_alloc(64, blockSize);
#endif
#ifdef CALC_MEM_PER_EDGE
#pragma omp atomic
totMem += blockSize;
#endif
*nextPtr = blockStart;
const u64 iter = blockSize >> log2size;
for(u64 i = 0; i < (iter - 1); i++){
void* nextBlock = (u8*)blockStart + segSize;
*(void**)blockStart = nextBlock;
blockStart = nextBlock;
}
*(void**)blockStart = nullptr;
}
void* ret = *nextPtr;
*nextPtr = *(void**)ret;
return ret;
}
void* allocPow2(u64 size){
return allocLog2(getPow2Log2(size));
}
void* allocate(u64 size){
return allocLog2(getNextPow2Log2(size));
}
void freeLog2(void* ptr, u64 log2size){
#ifdef _OPENMP
void* __restrict * nextPtr = &nextFreePtrs[omp_get_thread_num()][log2size];
#else
void* __restrict * nextPtr = &nextFreePtrs[0][log2size];
#endif
*(void**)ptr = *nextPtr;
*nextPtr = ptr;
}
void freePow2(void* __restrict ptr, u64 size){
freeLog2(ptr, getPow2Log2(size));
}
void deallocate(void* __restrict ptr, u64 size){
freeLog2(ptr, getNextPow2Log2(size));
}
};
|
hello_openMP_v1.c | #include<stdio.h>
#include<omp.h>
int main()
{
omp_set_num_threads(4);
#pragma omp parallel
{
printf("Hello World!\n");
}
return 0;
}
|
GenerateICs.c | #include <math.h>
#include <unistd.h>
#include <stdio.h>
#include <stdbool.h>
#include <ctype.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
//#include <pthread.h>
#include <omp.h>
#include <complex.h>
#include <fftw3.h>
#include <gsl/gsl_interp.h>
#include <gsl/gsl_integration.h>
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#include <gsl/gsl_roots.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_spline.h>
#include "21cmFAST.h"
#include "exceptions.h"
#include "logger.h"
#include "Constants.h"
#include "Globals.h"
#include "indexing.c"
#include "UsefulFunctions.c"
#include "ps.c"
#include "dft.c"
#include "PerturbField.c"
#include "bubble_helper_progs.c"
#include "elec_interp.c"
#include "heating_helper_progs.c"
#include "recombinations.c"
#include "IonisationBox.c"
#include "SpinTemperatureBox.c"
#include "BrightnessTemperatureBox.c"
#include "FindHaloes.c"
#include "PerturbHaloField.c"
void adj_complex_conj(fftwf_complex *HIRES_box, struct UserParams *user_params, struct CosmoParams *cosmo_params){
/***** Adjust the complex conjugate relations for a real array *****/
int i, j, k;
// corners
HIRES_box[C_INDEX(0,0,0)] = 0;
HIRES_box[C_INDEX(0,0,MIDDLE)] = crealf(HIRES_box[C_INDEX(0,0,MIDDLE)]);
HIRES_box[C_INDEX(0,MIDDLE,0)] = crealf(HIRES_box[C_INDEX(0,MIDDLE,0)]);
HIRES_box[C_INDEX(0,MIDDLE,MIDDLE)] = crealf(HIRES_box[C_INDEX(0,MIDDLE,MIDDLE)]);
HIRES_box[C_INDEX(MIDDLE,0,0)] = crealf(HIRES_box[C_INDEX(MIDDLE,0,0)]);
HIRES_box[C_INDEX(MIDDLE,0,MIDDLE)] = crealf(HIRES_box[C_INDEX(MIDDLE,0,MIDDLE)]);
HIRES_box[C_INDEX(MIDDLE,MIDDLE,0)] = crealf(HIRES_box[C_INDEX(MIDDLE,MIDDLE,0)]);
HIRES_box[C_INDEX(MIDDLE,MIDDLE,MIDDLE)] = crealf(HIRES_box[C_INDEX(MIDDLE,MIDDLE,MIDDLE)]);
// do entire i except corners
#pragma omp parallel shared(HIRES_box) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=1; i<MIDDLE; i++){
// just j corners
for (j=0; j<=MIDDLE; j+=MIDDLE){
for (k=0; k<=MIDDLE; k+=MIDDLE){
HIRES_box[C_INDEX(i,j,k)] = conjf(HIRES_box[C_INDEX((user_params->DIM)-i,j,k)]);
}
}
// all of j
for (j=1; j<MIDDLE; j++){
for (k=0; k<=MIDDLE; k+=MIDDLE){
HIRES_box[C_INDEX(i,j,k)] = conjf(HIRES_box[C_INDEX((user_params->DIM)-i,(user_params->DIM)-j,k)]);
HIRES_box[C_INDEX(i,(user_params->DIM)-j,k)] = conjf(HIRES_box[C_INDEX((user_params->DIM)-i,j,k)]);
}
}
} // end loop over i
}
// now the i corners
#pragma omp parallel shared(HIRES_box) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<=MIDDLE; i+=MIDDLE){
for (j=1; j<MIDDLE; j++){
for (k=0; k<=MIDDLE; k+=MIDDLE){
HIRES_box[C_INDEX(i,j,k)] = conjf(HIRES_box[C_INDEX(i,(user_params->DIM)-j,k)]);
}
}
} // end loop over remaining j
}
}
// Re-write of init.c for original 21cmFAST
int ComputeInitialConditions(
unsigned long long random_seed, struct UserParams *user_params,
struct CosmoParams *cosmo_params, struct InitialConditions *boxes
){
// Generates the initial conditions: gaussian random density field (user_params->DIM^3) as well as the equal or lower resolution velocity fields, and smoothed density field (user_params->HII_DIM^3).
//
// Author: Andrei Mesinger
// Date: 9/29/06
int status;
Try{ // This Try wraps the entire function so we don't indent.
// Makes the parameter structs visible to a variety of functions/macros
// Do each time to avoid Python garbage collection issues
Broadcast_struct_global_PS(user_params,cosmo_params);
Broadcast_struct_global_UF(user_params,cosmo_params);
unsigned long long ct;
int n_x, n_y, n_z, i, j, k, ii, thread_num, dimension;
float k_x, k_y, k_z, k_mag, p, a, b, k_sq;
double pixel_deltax;
float p_vcb, vcb_i;
float f_pixel_factor;
gsl_rng * r[user_params->N_THREADS];
gsl_rng * rseed = gsl_rng_alloc(gsl_rng_mt19937); // An RNG for generating seeds for multithreading
gsl_rng_set(rseed, random_seed);
omp_set_num_threads(user_params->N_THREADS);
switch(user_params->PERTURB_ON_HIGH_RES) {
case 0:
dimension = user_params->HII_DIM;
break;
case 1:
dimension = user_params->DIM;
break;
}
// ************ INITIALIZATION ********************** //
unsigned int seeds[user_params->N_THREADS];
// For multithreading, seeds for the RNGs are generated from an initial RNG (based on the input random_seed) and then shuffled (Author: Fred Davies)
int num_int = INT_MAX/16;
unsigned int *many_ints = (unsigned int *)malloc((size_t)(num_int*sizeof(unsigned int))); // Some large number of possible integers
for (i=0; i<num_int; i++) {
many_ints[i] = i;
}
gsl_ran_choose(rseed, seeds, user_params->N_THREADS, many_ints, num_int, sizeof(unsigned int)); // Populate the seeds array from the large list of integers
gsl_ran_shuffle(rseed, seeds, user_params->N_THREADS, sizeof(unsigned int)); // Shuffle the randomly selected integers
int checker;
checker = 0;
// seed the random number generators
for (thread_num = 0; thread_num < user_params->N_THREADS; thread_num++){
switch (checker){
case 0:
r[thread_num] = gsl_rng_alloc(gsl_rng_mt19937);
gsl_rng_set(r[thread_num], seeds[thread_num]);
break;
case 1:
r[thread_num] = gsl_rng_alloc(gsl_rng_gfsr4);
gsl_rng_set(r[thread_num], seeds[thread_num]);
break;
case 2:
r[thread_num] = gsl_rng_alloc(gsl_rng_cmrg);
gsl_rng_set(r[thread_num], seeds[thread_num]);
break;
case 3:
r[thread_num] = gsl_rng_alloc(gsl_rng_mrg);
gsl_rng_set(r[thread_num], seeds[thread_num]);
break;
case 4:
r[thread_num] = gsl_rng_alloc(gsl_rng_taus2);
gsl_rng_set(r[thread_num], seeds[thread_num]);
break;
} // end switch
checker += 1;
if(checker==5) {
checker = 0;
}
}
free(many_ints);
// allocate array for the k-space and real-space boxes
fftwf_complex *HIRES_box = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
fftwf_complex *HIRES_box_saved = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
// allocate array for the k-space and real-space boxes for vcb
fftwf_complex *HIRES_box_vcb_saved;
// HIRES_box_vcb_saved may be needed if FFTW_Wisdom doesn't exist -- currently unused
// but I am not going to allocate it until I am certain I needed it.
// find factor of HII pixel size / deltax pixel size
f_pixel_factor = user_params->DIM/(float)user_params->HII_DIM;
// ************ END INITIALIZATION ****************** //
LOG_DEBUG("Finished initialization.");
// ************ CREATE K-SPACE GAUSSIAN RANDOM FIELD *********** //
init_ps();
#pragma omp parallel shared(HIRES_box,r) \
private(n_x,n_y,n_z,k_x,k_y,k_z,k_mag,p,a,b,p_vcb) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (n_x=0; n_x<user_params->DIM; n_x++){
// convert index to numerical value for this component of the k-mode: k = (2*pi/L) * n
if (n_x>MIDDLE)
k_x =(n_x-user_params->DIM) * DELTA_K; // wrap around for FFT convention
else
k_x = n_x * DELTA_K;
for (n_y=0; n_y<user_params->DIM; n_y++){
// convert index to numerical value for this component of the k-mode: k = (2*pi/L) * n
if (n_y>MIDDLE)
k_y =(n_y-user_params->DIM) * DELTA_K;
else
k_y = n_y * DELTA_K;
// since physical space field is real, only half contains independent modes
for (n_z=0; n_z<=MIDDLE; n_z++){
// convert index to numerical value for this component of the k-mode: k = (2*pi/L) * n
k_z = n_z * DELTA_K;
// now get the power spectrum; remember, only the magnitude of k counts (due to issotropy)
// this could be used to speed-up later maybe
k_mag = sqrt(k_x*k_x + k_y*k_y + k_z*k_z);
p = power_in_k(k_mag);
// ok, now we can draw the values of the real and imaginary part
// of our k entry from a Gaussian distribution
if(user_params->NO_RNG) {
a = 1.0;
b = -1.0;
}
else {
a = gsl_ran_ugaussian(r[omp_get_thread_num()]);
b = gsl_ran_ugaussian(r[omp_get_thread_num()]);
}
HIRES_box[C_INDEX(n_x, n_y, n_z)] = sqrt(VOLUME*p/2.0) * (a + b*I);
}
}
}
}
LOG_DEBUG("Drawn random fields.");
// ***** Adjust the complex conjugate relations for a real array ***** //
adj_complex_conj(HIRES_box,user_params,cosmo_params);
memcpy(HIRES_box_saved, HIRES_box, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
// FFT back to real space
int stat = dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_box);
if(stat>0) Throw(stat);
LOG_DEBUG("FFT'd hires boxes.");
#pragma omp parallel shared(boxes,HIRES_box) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->DIM; i++){
for (j=0; j<user_params->DIM; j++){
for (k=0; k<user_params->DIM; k++){
*((float *)boxes->hires_density + R_INDEX(i,j,k)) = *((float *)HIRES_box + R_FFT_INDEX(i,j,k))/VOLUME;
}
}
}
}
// *** If required, let's also create a lower-resolution version of the density field *** //
memcpy(HIRES_box, HIRES_box_saved, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
// Only filter if we are perturbing on the low-resolution grid
if(!user_params->PERTURB_ON_HIGH_RES) {
if (user_params->DIM != user_params->HII_DIM) {
filter_box(HIRES_box, 0, 0, L_FACTOR*user_params->BOX_LEN/(user_params->HII_DIM+0.0));
}
// FFT back to real space
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_box);
// Renormalise the FFT'd box (sample the high-res box if we are perturbing on the low-res grid)
#pragma omp parallel shared(boxes,HIRES_box,f_pixel_factor) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->HII_DIM; i++){
for (j=0; j<user_params->HII_DIM; j++){
for (k=0; k<user_params->HII_DIM; k++){
boxes->lowres_density[HII_R_INDEX(i,j,k)] =
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5),
(unsigned long long)(j*f_pixel_factor+0.5),
(unsigned long long)(k*f_pixel_factor+0.5)))/VOLUME;
}
}
}
}
}
// ******* Relative Velocity part ******* //
if(user_params->USE_RELATIVE_VELOCITIES){
//JBM: We use the memory allocated to HIRES_box as it's free.
for(ii=0;ii<3;ii++) {
memcpy(HIRES_box, HIRES_box_saved, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
#pragma omp parallel shared(HIRES_box,ii) private(n_x,n_y,n_z,k_x,k_y,k_z,k_mag,p,p_vcb) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (n_x=0; n_x<user_params->DIM; n_x++){
if (n_x>MIDDLE)
k_x =(n_x-user_params->DIM) * DELTA_K; // wrap around for FFT convention
else
k_x = n_x * DELTA_K;
for (n_y=0; n_y<user_params->DIM; n_y++){
if (n_y>MIDDLE)
k_y =(n_y-user_params->DIM) * DELTA_K;
else
k_y = n_y * DELTA_K;
for (n_z=0; n_z<=MIDDLE; n_z++){
k_z = n_z * DELTA_K;
k_mag = sqrt(k_x*k_x + k_y*k_y + k_z*k_z);
p = power_in_k(k_mag);
p_vcb = power_in_vcb(k_mag);
// now set the velocities
if ((n_x==0) && (n_y==0) && (n_z==0)){ // DC mode
HIRES_box[0] = 0;
}
else{
if(ii==0) {
HIRES_box[C_INDEX(n_x,n_y,n_z)] *= I * k_x/k_mag * sqrt(p_vcb/p) * C_KMS;
}
if(ii==1) {
HIRES_box[C_INDEX(n_x,n_y,n_z)] *= I * k_y/k_mag * sqrt(p_vcb/p) * C_KMS;
}
if(ii==2) {
HIRES_box[C_INDEX(n_x,n_y,n_z)] *= I * k_z/k_mag * sqrt(p_vcb/p) * C_KMS;
}
}
}
}
}
}
//we only care about the lowres vcb box, so we filter it directly.
if (user_params->DIM != user_params->HII_DIM) {
filter_box(HIRES_box, 0, 0, L_FACTOR*user_params->BOX_LEN/(user_params->HII_DIM+0.0));
}
//fft each velocity component back to real space
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_box);
#pragma omp parallel shared(boxes,HIRES_box,f_pixel_factor,ii) private(i,j,k,vcb_i) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->HII_DIM; i++){
for (j=0; j<user_params->HII_DIM; j++){
for (k=0; k<user_params->HII_DIM; k++){
vcb_i = *((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5),
(unsigned long long)(j*f_pixel_factor+0.5),
(unsigned long long)(k*f_pixel_factor+0.5)));
boxes->lowres_vcb[HII_R_INDEX(i,j,k)] += vcb_i*vcb_i;
}
}
}
}
}
//now we take the sqrt of that and normalize the FFT
for (i=0; i<user_params->HII_DIM; i++){
for (j=0; j<user_params->HII_DIM; j++){
for (k=0; k<user_params->HII_DIM; k++){
boxes->lowres_vcb[HII_R_INDEX(i,j,k)] = sqrt(boxes->lowres_vcb[HII_R_INDEX(i,j,k)])/VOLUME;
}
}
}
}
LOG_DEBUG("Completed Relative velocities.");
// ******* End of Relative Velocity part ******* //
// Now look at the velocities
for(ii=0;ii<3;ii++) {
memcpy(HIRES_box, HIRES_box_saved, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
// Now let's set the velocity field/dD/dt (in comoving Mpc)
#pragma omp parallel shared(HIRES_box,ii) private(n_x,n_y,n_z,k_x,k_y,k_z,k_sq) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (n_x=0; n_x<user_params->DIM; n_x++){
if (n_x>MIDDLE)
k_x =(n_x-user_params->DIM) * DELTA_K; // wrap around for FFT convention
else
k_x = n_x * DELTA_K;
for (n_y=0; n_y<user_params->DIM; n_y++){
if (n_y>MIDDLE)
k_y =(n_y-user_params->DIM) * DELTA_K;
else
k_y = n_y * DELTA_K;
for (n_z=0; n_z<=MIDDLE; n_z++){
k_z = n_z * DELTA_K;
k_sq = k_x*k_x + k_y*k_y + k_z*k_z;
// now set the velocities
if ((n_x==0) && (n_y==0) && (n_z==0)){ // DC mode
HIRES_box[0] = 0;
}
else{
if(ii==0) {
HIRES_box[C_INDEX(n_x,n_y,n_z)] *= k_x*I/k_sq/VOLUME;
}
if(ii==1) {
HIRES_box[C_INDEX(n_x,n_y,n_z)] *= k_y*I/k_sq/VOLUME;
}
if(ii==2) {
HIRES_box[C_INDEX(n_x,n_y,n_z)] *= k_z*I/k_sq/VOLUME;
}
}
}
}
}
}
// Filter only if we require perturbing on the low-res grid
if(!user_params->PERTURB_ON_HIGH_RES) {
if (user_params->DIM != user_params->HII_DIM) {
filter_box(HIRES_box, 0, 0, L_FACTOR*user_params->BOX_LEN/(user_params->HII_DIM+0.0));
}
}
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_box);
// now sample to lower res
// now sample the filtered box
#pragma omp parallel shared(boxes,HIRES_box,f_pixel_factor,ii,dimension) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<dimension; i++){
for (j=0; j<dimension; j++){
for (k=0; k<dimension; k++){
if(user_params->PERTURB_ON_HIGH_RES) {
if(ii==0) {
boxes->hires_vx[R_INDEX(i,j,k)] =
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k)));
}
if(ii==1) {
boxes->hires_vy[R_INDEX(i,j,k)] =
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k)));
}
if(ii==2) {
boxes->hires_vz[R_INDEX(i,j,k)] =
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k)));
}
}
else {
if(ii==0) {
boxes->lowres_vx[HII_R_INDEX(i,j,k)] =
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5),
(unsigned long long)(j*f_pixel_factor+0.5),
(unsigned long long)(k*f_pixel_factor+0.5)));
}
if(ii==1) {
boxes->lowres_vy[HII_R_INDEX(i,j,k)] =
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5),
(unsigned long long)(j*f_pixel_factor+0.5),
(unsigned long long)(k*f_pixel_factor+0.5)));
}
if(ii==2) {
boxes->lowres_vz[HII_R_INDEX(i,j,k)] =
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5),
(unsigned long long)(j*f_pixel_factor+0.5),
(unsigned long long)(k*f_pixel_factor+0.5)));
}
}
}
}
}
}
}
LOG_DEBUG("Done Inverse FT.");
// * *************************************************** * //
// * BEGIN 2LPT PART * //
// * *************************************************** * //
// Generation of the second order Lagrangian perturbation theory (2LPT) corrections to the ZA
// reference: Scoccimarro R., 1998, MNRAS, 299, 1097-1118 Appendix D
// Parameter set in ANAL_PARAMS.H
if(user_params->USE_2LPT){
// use six supplementary boxes to store the gradients of phi_1 (eq. D13b)
// Allocating the boxes
#define PHI_INDEX(i, j) ((int) ((i) - (j)) + 3*((j)) - ((int)(j))/2 )
// ij -> INDEX
// 00 -> 0
// 11 -> 3
// 22 -> 5
// 10 -> 1
// 20 -> 2
// 21 -> 4
fftwf_complex *phi_1 = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
// First generate the ii,jj phi_1 boxes
int phi_component;
float component_ii,component_jj,component_ij;
// Indexing for the various phy components
int phi_directions[3][2] = {{0,1},{0,2},{1,2}};
#pragma omp parallel shared(HIRES_box,phi_1) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->DIM; i++){
for (j=0; j<user_params->DIM; j++){
for (k=0; k<user_params->DIM; k++){
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k)) ) = 0.;
}
}
}
}
// First iterate over the i = j components to phi
// We'll also save these temporarily to the hires_vi_2LPT boxes which will get
// overwritten later with the correct 2LPT velocities
for(phi_component=0;phi_component<3;phi_component++) {
i = j = phi_component;
// generate the phi_1 boxes in Fourier transform
#pragma omp parallel shared(HIRES_box,phi_1,i,j) private(n_x,n_y,n_z,k_x,k_y,k_z,k_sq,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (n_x=0; n_x<user_params->DIM; n_x++){
if (n_x>MIDDLE)
k_x =(n_x-user_params->DIM) * DELTA_K; // wrap around for FFT convention
else
k_x = n_x * DELTA_K;
for (n_y=0; n_y<user_params->DIM; n_y++){
if (n_y>MIDDLE)
k_y =(n_y-user_params->DIM) * DELTA_K;
else
k_y = n_y * DELTA_K;
for (n_z=0; n_z<=MIDDLE; n_z++){
k_z = n_z * DELTA_K;
k_sq = k_x*k_x + k_y*k_y + k_z*k_z;
float k[] = {k_x, k_y, k_z};
// now set the velocities
if ((n_x==0) && (n_y==0) && (n_z==0)){ // DC mode
phi_1[0] = 0;
}
else{
phi_1[C_INDEX(n_x,n_y,n_z)] = -k[i]*k[j]*HIRES_box_saved[C_INDEX(n_x, n_y, n_z)]/k_sq/VOLUME;
// note the last factor of 1/VOLUME accounts for the scaling in real-space, following the FFT
}
}
}
}
}
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, phi_1);
// Temporarily store in the allocated hires_vi_2LPT boxes
#pragma omp parallel shared(boxes,phi_1,phi_component) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->DIM; i++){
for (j=0; j<user_params->DIM; j++){
for (k=0; k<user_params->DIM; k++){
if(phi_component==0) {
boxes->hires_vx_2LPT[R_INDEX(i,j,k)] = *((float *)phi_1 + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k)));
}
if(phi_component==1) {
boxes->hires_vy_2LPT[R_INDEX(i,j,k)] = *((float *)phi_1 + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k)));
}
if(phi_component==2) {
boxes->hires_vz_2LPT[R_INDEX(i,j,k)] = *((float *)phi_1 + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k)));
}
}
}
}
}
}
for(phi_component=0;phi_component<3;phi_component++) {
// Now calculate the cross components and start evaluating the 2LPT field
i = phi_directions[phi_component][0];
j = phi_directions[phi_component][1];
// generate the phi_1 boxes in Fourier transform
#pragma omp parallel shared(HIRES_box,phi_1) private(n_x,n_y,n_z,k_x,k_y,k_z,k_sq,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (n_x=0; n_x<user_params->DIM; n_x++){
if (n_x>MIDDLE)
k_x =(n_x-user_params->DIM) * DELTA_K; // wrap around for FFT convention
else
k_x = n_x * DELTA_K;
for (n_y=0; n_y<user_params->DIM; n_y++){
if (n_y>MIDDLE)
k_y =(n_y-user_params->DIM) * DELTA_K;
else
k_y = n_y * DELTA_K;
for (n_z=0; n_z<=MIDDLE; n_z++){
k_z = n_z * DELTA_K;
k_sq = k_x*k_x + k_y*k_y + k_z*k_z;
float k[] = {k_x, k_y, k_z};
// now set the velocities
if ((n_x==0) && (n_y==0) && (n_z==0)){ // DC mode
phi_1[0] = 0;
}
else{
phi_1[C_INDEX(n_x,n_y,n_z)] = -k[i]*k[j]*HIRES_box_saved[C_INDEX(n_x, n_y, n_z)]/k_sq/VOLUME;
// note the last factor of 1/VOLUME accounts for the scaling in real-space, following the FFT
}
}
}
}
}
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, phi_1);
// Then we will have the laplacian of phi_2 (eq. D13b)
// After that we have to return in Fourier space and generate the Fourier transform of phi_2
#pragma omp parallel shared(HIRES_box,phi_1,phi_component) private(i,j,k,component_ii,component_jj,component_ij) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->DIM; i++){
for (j=0; j<user_params->DIM; j++){
for (k=0; k<user_params->DIM; k++){
// Note, I have temporarily stored the components into other arrays to minimise memory usage
// phi - {0, 1, 2} -> {hires_vx_2LPT, hires_vy_2LPT, hires_vz_2LPT}
// This may be opaque to the user, but this shouldn't need modification
if(phi_component==0) {
component_ii = boxes->hires_vx_2LPT[R_INDEX(i,j,k)];
component_jj = boxes->hires_vy_2LPT[R_INDEX(i,j,k)];
component_ij = *((float *)phi_1 + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k)));
}
if(phi_component==1) {
component_ii = boxes->hires_vx_2LPT[R_INDEX(i,j,k)];
component_jj = boxes->hires_vz_2LPT[R_INDEX(i,j,k)];
component_ij = *((float *)phi_1 + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k)));
}
if(phi_component==2) {
component_ii = boxes->hires_vy_2LPT[R_INDEX(i,j,k)];
component_jj = boxes->hires_vz_2LPT[R_INDEX(i,j,k)];
component_ij = *((float *)phi_1 + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k)));
}
// Kept in this form to maintain similar (possible) rounding errors
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k)) ) += \
( component_ii * component_jj );
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k)) ) -= \
( component_ij * component_ij );
}
}
}
}
}
#pragma omp parallel shared(HIRES_box,phi_1) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->DIM; i++){
for (j=0; j<user_params->DIM; j++){
for (k=0; k<user_params->DIM; k++){
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i),(unsigned long long)(j),(unsigned long long)(k)) ) /= TOT_NUM_PIXELS;
}
}
}
}
// Perform FFTs
dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_box);
memcpy(HIRES_box_saved, HIRES_box, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
// Now we can store the content of box in a back-up array
// Then we can generate the gradients of phi_2 (eq. D13b and D9)
// ***** Store back-up k-box RHS eq. D13b ***** //
// For each component, we generate the velocity field (same as the ZA part)
// Now let's set the velocity field/dD/dt (in comoving Mpc)
// read in the box
// TODO correct free of phi_1
for(ii=0;ii<3;ii++) {
if(ii>0) {
memcpy(HIRES_box, HIRES_box_saved, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
}
#pragma omp parallel shared(HIRES_box,ii) private(n_x,n_y,n_z,k_x,k_y,k_z,k_sq) num_threads(user_params->N_THREADS)
{
#pragma omp for
// set velocities/dD/dt
for (n_x=0; n_x<user_params->DIM; n_x++){
if (n_x>MIDDLE)
k_x =(n_x-user_params->DIM) * DELTA_K; // wrap around for FFT convention
else
k_x = n_x * DELTA_K;
for (n_y=0; n_y<user_params->DIM; n_y++){
if (n_y>MIDDLE)
k_y =(n_y-user_params->DIM) * DELTA_K;
else
k_y = n_y * DELTA_K;
for (n_z=0; n_z<=MIDDLE; n_z++){
k_z = n_z * DELTA_K;
k_sq = k_x*k_x + k_y*k_y + k_z*k_z;
// now set the velocities
if ((n_x==0) && (n_y==0) && (n_z==0)){ // DC mode
HIRES_box[0] = 0;
}
else{
if(ii==0) {
HIRES_box[C_INDEX(n_x,n_y,n_z)] *= k_x*I/k_sq;
}
if(ii==1) {
HIRES_box[C_INDEX(n_x,n_y,n_z)] *= k_y*I/k_sq;
}
if(ii==2) {
HIRES_box[C_INDEX(n_x,n_y,n_z)] *= k_z*I/k_sq;
}
}
}
// note the last factor of 1/VOLUME accounts for the scaling in real-space, following the FFT
}
}
}
// Filter only if we require perturbing on the low-res grid
if(!user_params->PERTURB_ON_HIGH_RES) {
if (user_params->DIM != user_params->HII_DIM) {
filter_box(HIRES_box, 0, 0, L_FACTOR*user_params->BOX_LEN/(user_params->HII_DIM+0.0));
}
}
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_box);
// now sample to lower res
// now sample the filtered box
#pragma omp parallel shared(boxes,HIRES_box,f_pixel_factor,ii,dimension) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<dimension; i++){
for (j=0; j<dimension; j++){
for (k=0; k<dimension; k++){
if(user_params->PERTURB_ON_HIGH_RES) {
if(ii==0) {
boxes->hires_vx_2LPT[R_INDEX(i,j,k)] =
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k)));
}
if(ii==1) {
boxes->hires_vy_2LPT[R_INDEX(i,j,k)] =
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k)));
}
if(ii==2) {
boxes->hires_vz_2LPT[R_INDEX(i,j,k)] =
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k)));
}
}
else {
if(ii==0) {
boxes->lowres_vx_2LPT[HII_R_INDEX(i,j,k)] =
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5),
(unsigned long long)(j*f_pixel_factor+0.5),
(unsigned long long)(k*f_pixel_factor+0.5)));
}
if(ii==1) {
boxes->lowres_vy_2LPT[HII_R_INDEX(i,j,k)] =
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5),
(unsigned long long)(j*f_pixel_factor+0.5),
(unsigned long long)(k*f_pixel_factor+0.5)));
}
if(ii==2) {
boxes->lowres_vz_2LPT[HII_R_INDEX(i,j,k)] =
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5),
(unsigned long long)(j*f_pixel_factor+0.5),
(unsigned long long)(k*f_pixel_factor+0.5)));
}
}
}
}
}
}
}
// deallocate the supplementary boxes
fftwf_free(phi_1);
}
LOG_DEBUG("Done 2LPT.");
// * *********************************************** * //
// * END 2LPT PART * //
// * *********************************************** * //
fftwf_cleanup_threads();
fftwf_cleanup();
fftwf_forget_wisdom();
// deallocate
fftwf_free(HIRES_box);
fftwf_free(HIRES_box_saved);
free_ps();
for (i=0; i<user_params->N_THREADS; i++) {
gsl_rng_free (r[i]);
}
gsl_rng_free(rseed);
LOG_DEBUG("Cleaned Up.");
} // End of Try{}
Catch(status){
return(status);
}
return(0);
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 24;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_binop__times_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__times_int16)
// A.*B function (eWiseMult): GB (_AemultB_01__times_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__times_int16)
// A.*B function (eWiseMult): GB (_AemultB_03__times_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__times_int16)
// A*D function (colscale): GB (_AxD__times_int16)
// D*A function (rowscale): GB (_DxB__times_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__times_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__times_int16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_int16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_int16)
// C=scalar+B GB (_bind1st__times_int16)
// C=scalar+B' GB (_bind1st_tran__times_int16)
// C=A+scalar GB (_bind2nd__times_int16)
// C=A'+scalar GB (_bind2nd_tran__times_int16)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (aij * bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x * y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_INT16 || GxB_NO_TIMES_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__times_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__times_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__times_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__times_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__times_int16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__times_int16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__times_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__times_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__times_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__times_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__times_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__times_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x * bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__times_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij * y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x * aij) ; \
}
GrB_Info GB (_bind1st_tran__times_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij * y) ; \
}
GrB_Info GB (_bind2nd_tran__times_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
avx2-vec-mask-bit-not.c | /* { dg-do compile } */
/* { dg-require-effective-target avx2 } */
/* { dg-options "-mavx2 -O3 -fopenmp-simd -fdump-tree-vect-details" } */
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
#define N 1024
int a[N], b[N], c[N], d[N], e[N];
void
test (void)
{
int i;
#pragma omp simd
for (i = 0; i < N; i++)
if (!(a[i] > b[i] && c[i] < d[i]))
e[i] = 0;
}
|
utilities.h | /**
* Copyright 2018, University of Freiburg
* Optophysiology Lab.
* Thomas Leyh <thomas.leyh@mailbox.org>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <vector>
#include <cstddef>
#include <random>
#include <stdexcept>
#include <iterator>
#include <climits>
#include <chrono>
#include "Histogram2d.h"
/**
* Calculates the histogram indices of a certain data container.
* @param bins Number of bins for the imaginary histogram.
* @param min Minimum value in data container.
* @param max Maximum value in data container.
* @param begin Iterator to the beginning of the data.
* @param end Iterator to the end of the data.
* @return A vector with the same size as the data holding the index positions.
* If there is a value outside the [min,max] range UINT_MAX is inserted
* at this position.
*/
template<typename T, typename Iterator>
std::vector<int> calculate_indices_1d(
const int bins,
const T min, const T max,
const Iterator begin, const Iterator end);
/**
* Small struct for simply holding two index values.
*/
struct index_pair
{
int first;
int second;
};
/**
* Calculate indices of a 2d histogram for a certain data container.
* @param binsX Number of bins on imaginary histogram's x-axis.
* @param binsY Number of bins on imaginary histogram's y-axis.
* @param minX Minimum value in first data container.
* @param maxX Maximum value in first data container.
* @param minY Minimum value in second data container.
* @param maxY Maximum value in second data container.
* @param beginX Iterator to the beginning of the first data container.
* @param endX Iterator to the end of the first data container.
* @param beginY Iterator to the beginning of the second data container.
* @param endY Iterator to the end of the second data container.
* Both containers must have the same size.
* @return Vector of index_pair structs. The vector has the same size as the data.
* If there is a value outside the [min,max] range index_pair {UINT_MAX, UINT_MAX}
* is inserted at this position.
*/
template<typename T, typename Iterator>
std::vector<index_pair> calculate_indices_2d(
const int binsX, const int binsY,
const T minX, const T maxX,
const T minY, const T maxY,
const Iterator beginX, const Iterator endX,
const Iterator beginY, const Iterator endY);
/**
* Calculate a certain range of mutual information by shifting the second data container
* in relation to the first one.
* No shift:
* |--------------------| dataX
* |--------------------| dataY
* Negative shift:
* |--------------------| dataX
* |--------------------| dataY
* Positive shift:
* |--------------------| dataX
* |--------------------| dataY
* @param shift_from Negative (or positive) value for specifying the largest shift to the left.
* @param shift_to Positive (of negative) value for specifying the largest shift to the right.
* Has to be greater than shift_from.
* @param minX Minimum value in first data container.
* @param maxX Maximum value in first data container.
* @param minY Minimum value in second data container.
* @param maxY Maximum value in second data container.
* @param beginX Iterator to the beginning of the first data container.
* @param endX Iterator to the end of the first data container.
* @param beginY Iterator to the beginning of the second data container.
* @param endY Iterator to the end of the second data container.
* Both containers must have the same size.
* @param shift_step (Optional) Specifies the steps between shifts. Default = 1.
* @return Vector with size (shift_to - shift_from) holding the mutual information for each shift.
* Might be smaller if shift_step is specified.
*/
template<typename T, typename Iterator>
std::vector<T> shifted_mutual_information(
const int shift_from, const int shift_to,
const int binsX, const int binsY,
const T minX, const T maxX, const T minY, const T maxY,
const Iterator beginX, const Iterator endX,
const Iterator beginY, const Iterator endY,
const int shift_step = 1);
/**
* Calculates the mutual information of the two given data vectors X and Y
* by using bootstrapping. This is done by first generating nr_samples
* histograms by sampling the data and then again sampling these histograms
* and adding them together.
* Helper function for shifted_mutual_information_with_bootstrap.
*/
template<typename T, typename Iterator>
std::vector<T> bootstrapped_mi(const Iterator beginX, const Iterator endX,
const Iterator beginY, const Iterator endY,
const int binsX, const int binsY,
const T minX, const T maxX, const T minY, const T maxY,
int nr_samples, int nr_repetitions, std::mt19937& rgen);
/**
* Similar to shifted_mutual_information but additionally uses bootstrapping
* this increasing its runtime. There are two additional parameters:
* @param nr_samples How many temporary histograms to generate by sampling the data.
* @param nr_repetitions How many times to repeat this process to minimize noise.
* @return A vector of size `(shift_to - shift_from) / shift_step + 1`
holding vectors of size `nr_repetitions` with the mutual information.
*/
template<typename T, typename Iterator>
std::vector< std::vector<T> > shifted_mutual_information_with_bootstrap(
const int shift_from, const int shift_to,
const int binsX, const int binsY,
const T minX, const T maxX, const T minY, const T maxY,
const Iterator beginX, const Iterator endX,
const Iterator beginY, const Iterator endY,
int nr_samples, int nr_repetitions,
const int shift_step = 1);
/**
* This is for the matlab mex interface:
* Instead of returning a vector the result is written to a pointer location.
* The values specified by beginX, endX, beginY, endY are the histogram indices in range [0, nr_bins).
* @param output A pointer to to a vector of size (shift_to - shift_from) / shift_step + 1
*/
template<typename T>
void shifted_mutual_information(
const int shift_from, const int shift_to,
const int binsX, const int binsY,
const T minX, const T maxX, const T minY, const T maxY,
const int* beginX, const int* endX,
const int* beginY, const int* endY,
const int shift_step,
T* output);
/**
* This is for the matlab mex interface:
* Instead of returning a vector the result is written to a pointer location.
* The values specified by beginX, endX, beginY, endY are the histogram indices in range [0, nr_bins).
* @param output A pointer to to a vector of size ((shift_to - shift_from) / shift_step + 1) * nr_repetitions
*/
template<typename T>
void shifted_mutual_information_with_bootstrap(
const int shift_from, const int shift_to,
const int binsX, const int binsY,
const T minX, const T maxX, const T minY, const T maxY,
const int* beginX, const int* endX,
const int* beginY, const int* endY,
int nr_samples, int nr_repetitions,
const int shift_step,
T* output);
//////////////////
/// IMPLEMENTATION
//////////////////
template<typename T, typename Iterator>
std::vector<int> calculate_indices_1d(
const int bins,
const T min, const T max,
const Iterator begin, const Iterator end)
{
if (min >= max)
throw std::logic_error("min has to be smaller than max.");
if (bins < 1)
throw std::invalid_argument("There must be at least one bin.");
int size = std::distance(begin, end);
std::vector<int> result(size);
// Most code token from Histogram1d class.
#pragma omp parallel for
for (int i = 0; i < size; ++i)
{
auto value = begin[i];
if (value >= min && value < max)
{
T normalized = (value - min) / (max - min);
#pragma warning(suppress: 4244)
int index = normalized * bins; // Implicit conversion to integer.
result[i] = index;
}
else if (value == max)
{
result[i] = bins - 1;
}
else
{
result[i] = INT_MAX;
}
}
return result;
}
template<typename T, typename Iterator>
std::vector<index_pair> calculate_indices_2d(
const int binsX, const int binsY,
const T minX, const T maxX,
const T minY, const T maxY,
const Iterator beginX, const Iterator endX,
const Iterator beginY, const Iterator endY)
{
if (minX >= maxX)
throw std::logic_error("minX has to be smaller than maxX.");
if (minY >= maxY)
throw std::logic_error("minY has to be smaller than maxY.");
if (binsX < 1)
throw std::invalid_argument("There must be at least one binX.");
if (binsY < 1)
throw std::invalid_argument("There must be at least one binY.");
int sizeX = std::distance(beginX, endX);
int sizeY = std::distance(beginY, endY);
if (sizeX != sizeY)
throw std::logic_error("Containers referenced by iterators must have the same size.");
std::vector<index_pair> result(sizeX);
// Most code token from Histogram2d class.
#pragma omp parallel for
for (int i = 0; i < sizeX; ++i)
{
auto x = beginX[i];
auto y = beginY[i];
if (x >= minX
&& x <= maxX
&& y >= minY
&& y <= maxY)
{
int indexX;
if (x == maxX)
indexX = binsX - 1;
else
#pragma warning(suppress: 4244)
indexX = (x - minX) / (maxX - minX) * binsX;
int indexY;
if (y == maxY)
indexY = binsY - 1;
else
#pragma warning(suppress: 4244)
indexY = (y - minY) / (maxY - minY) * binsY;
result[i] = index_pair{ indexX, indexY }; // I hope this is allowed.
}
else
{
result[i] = index_pair{ INT_MAX, INT_MAX };
}
}
return result;
}
template<typename T>
inline void check_shifted_mutual_information(
const size_t sizeX, const size_t sizeY,
const int shift_from, const int shift_to,
const int binsX, const int binsY,
const T minX, const T maxX, const T minY, const T maxY, const int shift_step)
{
if (sizeX != sizeY)
throw std::logic_error("Containers referenced by iterators must have the same size.");
if (shift_from >= shift_to)
throw std::logic_error("shift_from has to be smaller than shift_to.");
if (minX >= maxX)
throw std::logic_error("minX has to be smaller than maxX.");
if (minY >= maxY)
throw std::logic_error("minY has to be smaller than maxY.");
if (binsX < 1)
throw std::invalid_argument("There must be at least one binX.");
if (binsY < 1)
throw std::invalid_argument("There must be at least one binY.");
if ((size_t)(shift_to < 0 ? -shift_to : shift_to) >= sizeX)
throw std::logic_error("Maximum shift does not fit data size.");
if ((size_t)(shift_from < 0 ? -shift_from : shift_from) >= sizeX)
throw std::logic_error("Minimum shift does not fit data size.");
if (shift_step < 1)
throw std::invalid_argument("shift_step must be greater or equal 1.");
}
template<typename T, typename Iterator>
std::vector<T> shifted_mutual_information(
const int shift_from, const int shift_to,
const int binsX, const int binsY,
const T minX, const T maxX, const T minY, const T maxY,
const Iterator beginX, const Iterator endX,
const Iterator beginY, const Iterator endY,
const int shift_step /* 1 */)
{
size_t sizeX = std::distance(beginX, endX);
size_t sizeY = std::distance(beginY, endY);
check_shifted_mutual_information(sizeX, sizeY, shift_from, shift_to,
binsX, binsY, minX, maxX, minY, maxY, shift_step);
std::vector<int> indicesX = calculate_indices_1d(binsX, minX, maxX, beginX, endX);
std::vector<int> indicesY = calculate_indices_1d(binsY, minY, maxY, beginY, endY);
auto indX_begin = indicesX.begin();
auto indX_end = indicesX.end();
auto indY_begin = indicesY.begin();
auto indY_end = indicesY.end();
std::vector<T> result((shift_to - shift_from) / shift_step + 1);
#pragma omp parallel for
for (int i = shift_from; i <= shift_to; i += shift_step)
{
Histogram2d<T> hist(binsX, binsY, minX, maxX, minY, maxY);
if (i < 0)
{
hist.increment_cpu(indX_begin, std::prev(indX_end, -i),
std::next(indY_begin, -i), indY_end);
}
else if (i > 0)
{
hist.increment_cpu(std::next(indX_begin, i), indX_end,
indY_begin, std::prev(indY_end, i));
}
else // Should not be necessary but better be explicit.
{
hist.increment_cpu(indX_begin, indX_end,
indY_begin, indY_end);
}
result[(i - shift_from) / shift_step] = *hist.calculate_mutual_information();
}
return result;
}
template<typename T, typename Iterator>
std::vector<T> bootstrapped_mi(const Iterator beginX, const Iterator endX,
const Iterator beginY, const Iterator endY,
const int binsX, const int binsY,
const T minX, const T maxX, const T minY, const T maxY,
int nr_samples, int nr_repetitions, std::mt19937& rgen)
{
size_t sizeX = std::distance(beginX, endX);
size_t sizeY = std::distance(beginY, endY);
if (sizeX != sizeY)
throw std::logic_error("Containers referenced by iterators must have the same size.");
std::uniform_int_distribution<int> uniform(0, sizeX - 1);
std::vector<Histogram2d<T>*> hist3d(nr_samples); // A vector of raw pointers :(
int nr_samples_per_histogram = sizeX / nr_samples;
// First create some histograms from randomly sampled data pairs.
for (int sample = 0; sample < nr_samples; ++sample)
{
Histogram2d<T>* hist_ptr = new Histogram2d<T>(binsX, binsY, minX, maxX, minY, maxY);
for (int i = 0; i < nr_samples_per_histogram; ++i)
{
int ridx = uniform(rgen);
hist_ptr->increment_at(beginX[ridx], beginY[ridx]);
}
hist3d[sample] = hist_ptr;
}
// Now sample these histograms again and add them together.
std::uniform_int_distribution<int> uniform_from_samples(0, nr_samples - 1);
std::vector<T> results(nr_repetitions);
for (int i = 0; i < nr_repetitions; ++i)
{
Histogram2d<T> final_hist(binsX, binsY, minX, maxX, minY, maxY);
for (int sample = 0; sample < nr_samples; ++sample)
{
int sampleidx = uniform_from_samples(rgen);
final_hist.add(*hist3d[sampleidx]);
}
results[i] = *final_hist.calculate_mutual_information();
}
// Cleanup (even though using raw pointers is not really elegant)
for (int sample = 0; sample < nr_samples; ++sample)
{
delete hist3d[sample];
}
return results;
}
template<typename T, typename Iterator>
std::vector< std::vector<T> > shifted_mutual_information_with_bootstrap(
const int shift_from, const int shift_to,
const int binsX, const int binsY,
const T minX, const T maxX, const T minY, const T maxY,
const Iterator beginX, const Iterator endX,
const Iterator beginY, const Iterator endY,
int nr_samples, int nr_repetitions,
const int shift_step /* 1 */)
{
size_t sizeX = std::distance(beginX, endX);
size_t sizeY = std::distance(beginY, endY);
check_shifted_mutual_information(sizeX, sizeY, shift_from, shift_to,
binsX, binsY, minX, maxX, minY, maxY, shift_step);
if (nr_samples < 1)
throw std::logic_error("For bootstrapping you need a minimum of one sample.");
if (nr_repetitions < 1)
throw std::logic_error("There needs to be at least one repetition of the bootstrapping process.");
std::vector<int> indicesX = calculate_indices_1d(binsX, minX, maxX, beginX, endX);
std::vector<int> indicesY = calculate_indices_1d(binsY, minY, maxY, beginY, endY);
auto indX_begin = indicesX.begin();
auto indX_end = indicesX.end();
auto indY_begin = indicesY.begin();
auto indY_end = indicesY.end();
std::vector< std::vector<T> > result((shift_to - shift_from) / shift_step + 1);
#pragma omp parallel for
for (int i = shift_from; i <= shift_to; i += shift_step)
{
unsigned int seed = std::chrono::high_resolution_clock::now().time_since_epoch().count();
std::mt19937 rgen(seed);
std::vector<T> mi;
if (i < 0)
{
mi = bootstrapped_mi<T>(indX_begin, std::prev(indX_end, -i),
std::next(indY_begin, -i), indY_end,
binsX, binsY, minX, maxX, minY, maxY, nr_samples, nr_repetitions, rgen);
}
else if (i > 0)
{
mi = bootstrapped_mi<T>(std::next(indX_begin, i), indX_end,
indY_begin, std::prev(indY_end, i),
binsX, binsY, minX, maxX, minY, maxY, nr_samples, nr_repetitions, rgen);
}
else // Should not be necessary but better be explicit.
{
mi = bootstrapped_mi<T>(indX_begin, indX_end,
indY_begin, indY_end,
binsX, binsY, minX, maxX, minY, maxY, nr_samples, nr_repetitions, rgen);
}
result[(i - shift_from) / shift_step] = mi;
}
return result;
}
template<typename T>
void shifted_mutual_information(
const int shift_from, const int shift_to,
const int binsX, const int binsY,
const T minX, const T maxX, const T minY, const T maxY,
const int* beginX, const int* endX,
const int* beginY, const int* endY,
const int shift_step,
T* output)
{
size_t sizeX = std::distance(beginX, endX);
size_t sizeY = std::distance(beginY, endY);
check_shifted_mutual_information(sizeX, sizeY, shift_from, shift_to,
binsX, binsY, minX, maxX, minY, maxY, shift_step);
#pragma omp parallel for
for (int i = shift_from; i <= shift_to; i += shift_step)
{
Histogram2d<T> hist(binsX, binsY, minX, maxX, minY, maxY);
if (i < 0)
{
hist.increment_cpu(beginX, std::prev(endX, -i),
std::next(beginY, -i), endY);
}
else if (i > 0)
{
hist.increment_cpu(std::next(beginX, i), endX,
beginY, std::prev(endY, i));
}
else // Should not be necessary but better be explicit.
{
hist.increment_cpu(beginX, endX,
beginY, endY);
}
output[(i - shift_from) / shift_step] = *hist.calculate_mutual_information();
}
}
template<typename T>
void shifted_mutual_information_with_bootstrap(
const int shift_from, const int shift_to,
const int binsX, const int binsY,
const T minX, const T maxX, const T minY, const T maxY,
const int* beginX, const int* endX,
const int* beginY, const int* endY,
int nr_samples, int nr_repetitions,
const int shift_step,
T* output)
{
size_t sizeX = std::distance(beginX, endX);
size_t sizeY = std::distance(beginY, endY);
check_shifted_mutual_information(sizeX, sizeY, shift_from, shift_to,
binsX, binsY, minX, maxX, minY, maxY, shift_step);
if (nr_samples < 1)
throw std::logic_error("For bootstrapping you need a minimum of one sample.");
if (nr_repetitions < 1)
throw std::logic_error("There needs to be at least one repetition of the bootstrapping process.");
#pragma omp parallel for
for (int i = shift_from; i <= shift_to; i += shift_step)
{
unsigned int seed = std::chrono::high_resolution_clock::now().time_since_epoch().count();
std::mt19937 rgen(seed);
std::vector<T> mi;
if (i < 0)
{
mi = bootstrapped_mi<T>(beginX, std::prev(endX, -i),
std::next(beginY, -i), endY,
binsX, binsY, minX, maxX, minY, maxY, nr_samples, nr_repetitions, rgen);
}
else if (i > 0)
{
mi = bootstrapped_mi<T>(std::next(beginX, i), endX,
beginY, std::prev(endY, i),
binsX, binsY, minX, maxX, minY, maxY, nr_samples, nr_repetitions, rgen);
}
else // Should not be necessary but better be explicit.
{
mi = bootstrapped_mi<T>(beginX, endX,
beginY, endY,
binsX, binsY, minX, maxX, minY, maxY, nr_samples, nr_repetitions, rgen);
}
for (int j = 0; j < nr_repetitions; ++j) // Copy result to output.
{
output[((i - shift_from) / shift_step) * nr_repetitions + j] = mi[j];
}
}
} |
ioc-ummap-bandwidth-mpi.c | //mpicc ioc-ummap-bandwidth-mpi.c -I$HOME/test-rdma/usr2/include -lioc-client -lummap-io -L$HOME/test-rdma/usr2/lib -Wl,-rpath,$HOME/test-rdma/usr2/lib -o ioc-ummap-bandwidth-mpi
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <stdbool.h>
#include <ioc-client.h>
#include <time.h>
#include <mpi.h>
#include <ummap/ummap.h>
#include <omp.h>
const size_t total_size = 4UL*1024UL*1024UL*1024UL;
const size_t ref_repeat = 10;
static inline double timespec_diff(struct timespec *a, struct timespec *b) {
struct timespec result;
result.tv_sec = a->tv_sec - b->tv_sec;
result.tv_nsec = a->tv_nsec - b->tv_nsec;
if (result.tv_nsec < 0) {
--result.tv_sec;
result.tv_nsec += 1000000000L;
}
return (double)result.tv_sec + (double)result.tv_nsec / (double)1e9;
}
void make_ummap_read(ioc_client_t * client, char * buffer0, size_t size, size_t seg_size, size_t repeat)
{
//get MPI rank
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
size_t threads = omp_get_max_threads();
//calc base
size_t base = rank * size;
//ummap
ummap_driver_t * driver = ummap_driver_create_ioc(client, 10, 20, rank == 0);
ummap_policy_t * policy = ummap_policy_create_fifo(2 * threads * seg_size, true);
int flags = 0;
if (seg_size <= 131072)
flags |= UMMAP_THREAD_UNSAFE;
char * buffer = ummap(NULL, size, seg_size, base, PROT_READ|PROT_WRITE, flags, driver, policy, NULL);
//access
size_t r;
size_t offset;
size_t sum = 0;
for (r = 0 ; r < repeat ; r++) {
#pragma omp parallel for
for (offset = 0 ; offset < size ; offset +=seg_size)
sum+=buffer[offset];
}
//unmap
umunmap(buffer, false);
}
void make_ummap_write(ioc_client_t * client, char * buffer0, size_t size, size_t seg_size, size_t repeat)
{
//get MPI rank
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
size_t threads = omp_get_max_threads();
//calc base
size_t base = rank * size;
//ummap
ummap_driver_t * driver = ummap_driver_create_ioc(client, 10, 20, rank == 0);
ummap_policy_t * policy = ummap_policy_create_fifo(2 * threads * seg_size, true);
int flags = 0;
if (seg_size <= 131072)
flags |= UMMAP_THREAD_UNSAFE;
char * buffer = ummap(NULL, size, seg_size, base, PROT_READ|PROT_WRITE, UMMAP_NO_FIRST_READ|flags, driver, policy, NULL);
//access
size_t r;
size_t offset;
for (r = 0 ; r < repeat ; r++) {
ummap_skip_first_read(buffer);
#pragma omp parallel for
for (offset = 0 ; offset < size ; offset += seg_size)
buffer[offset]++;
}
//unmap
umunmap(buffer, false);
}
void make_write(ioc_client_t * client, char * buffer, size_t size, size_t seg_size, size_t repeat)
{
//get MPI rank
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
//do
size_t r;
size_t offset;
size_t base = rank * size;
for (r = 0 ; r < repeat ; r++)
for (offset = 0 ; offset < size ; offset += seg_size)
ioc_client_obj_write(client, 10, 20, buffer, seg_size, base + offset);
}
void make_read(ioc_client_t * client, char * buffer, size_t size, size_t seg_size, size_t repeat)
{
//get MPI rank
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
//do
size_t r;
size_t offset;
size_t base = rank * size;
for (r = 0 ; r < repeat ; r++)
for (offset = 0 ; offset < size ; offset += seg_size)
ioc_client_obj_read(client, 10, 20, buffer, seg_size, base + offset);
}
double calc_bandwidth(ioc_client_t * client, char * buffer, size_t size, size_t seg_size, size_t repeat, void(*op)(ioc_client_t * client, char * buffer, size_t size, size_t seg_size, size_t repeat))
{
//wait all
MPI_Barrier(MPI_COMM_WORLD);
//start
struct timespec start, stop;
clock_gettime(CLOCK_MONOTONIC, &start);
//call to all
op(client, buffer, size, seg_size, repeat);
//wait all
MPI_Barrier(MPI_COMM_WORLD);
//stop
clock_gettime(CLOCK_MONOTONIC, &stop);
//compute time
double t = timespec_diff(&stop, &start);
//calc bandwidth
double bw = (double)repeat * (double)total_size / 1024.0 / 1024.0 / 1024.0 / t;
//ok return
return bw;
}
int main(int argc, char ** argv)
{
//check args
if (argc < 2) {
fprintf(stderr, "%s {ioc_server_ip}\n", argv[0]);
return EXIT_FAILURE;
}
//init MPI
MPI_Init(&argc, &argv);
//init ummapio
ummap_init();
//get MPI infos
int rank;
int world;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &world);
//connect to server
ioc_client_t * client = ioc_client_init(argv[1], "8556");
//cal size
size_t size = total_size / world;
//allocate buffer
char * buffer = malloc(size);
memset(buffer, 0, size);
//to ensure object is created, make a first round trip
//calc_bandwidth(client, buffer, size, 8*1024*1024, ref_repeat, make_read);
//calc_bandwidth(client, buffer, size, 8*1024*1024, ref_repeat, make_write);
calc_bandwidth(client, buffer, size, 8*1024*1024, ref_repeat, make_ummap_read);
calc_bandwidth(client, buffer, size, 8*1024*1024, ref_repeat, make_ummap_write);
//header
if (rank == 0) {
printf("#total_size=%f GB\n", (double)total_size/1024.0/1024.0/1024.0);
printf("#world_size=%d\n", world);
printf("#seg_size (bytes) read (GB/s) twrite(GB/s)\n");
}
//loop on all size
size_t seg_size = 16 * 1024 * 1024;
for ( ; seg_size >= 4096 ; seg_size /= 2) {
//calc repeat
size_t repeat = ref_repeat;
//if (seg_size > 256*1024)
// repeat *= 2;
//measure read
//double read_bw = calc_bandwidth(client, buffer, size, seg_size, repeat, make_read);
//double write_bw = calc_bandwidth(client, buffer, size, seg_size, repeat, make_write);
double read_bw = calc_bandwidth(client, buffer, size, seg_size, repeat, make_ummap_read);
double write_bw = calc_bandwidth(client, buffer, size, seg_size, repeat, make_ummap_write);
//print
if (rank == 0)
printf("%zu %f %f\n", seg_size, read_bw, write_bw);
}
//close connection
ioc_client_fini(client);
//fini ummap
ummap_finalize();
//fin i mpi
MPI_Finalize();
//ok
return EXIT_SUCCESS;
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 24;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
bench.c | #include "omp.h"
#include "pmsis.h"
#define LOOP_ITER (2048)
#define NB_ITER (256)
#define NB_BARRIER_ITER (256)
#define NB_ITER_SINGLE (128)
#define CORE_ID pi_core_id()
#define PRINTF(...)
//#define PRINTF(...) printf(__VA_ARGS__)
static void test_start_timer()
{
pi_perf_cl_reset();
pi_perf_conf(1<<PI_PERF_CYCLES);
pi_perf_cl_start();
}
static void test_reset_timer()
{
pi_perf_cl_reset();
}
static unsigned int test_get_time()
{
return pi_perf_cl_read(PI_PERF_CYCLES);
}
static inline unsigned int startTimer() {
PRINTF("Starting timer\n");
test_reset_timer();
test_start_timer();
return 0;
}
static inline unsigned int getTimer(unsigned int start)
{
PRINTF("Ending timer\n");
return test_get_time();
}
void test_barrier(unsigned int nthreads)
{
#pragma omp parallel num_threads(nthreads) shared(nthreads)
{
unsigned int start;
int i;
float operation_cost = 0;
if (omp_get_thread_num() == 0) {
start = startTimer();
}
for (i = 0; i < NB_BARRIER_ITER; i++)
{
#pragma omp barrier
}
if (omp_get_thread_num() == 0) {
unsigned int end = getTimer(start);
operation_cost = (float) end / NB_BARRIER_ITER;
printf("BARRIER %d threads: %f cycles\n", nthreads, operation_cost);
}
}
}
void test_critical(unsigned int nthreads)
{
#pragma omp parallel num_threads(nthreads)
{
int i;
unsigned int start = startTimer();
float operation_cost = 0;
for (i = 0; i < NB_ITER; i++)
{
#pragma omp critical
{
volatile int a = 0;
}
}
#pragma omp barrier
operation_cost = (float) getTimer(start) / NB_ITER;
if (CORE_ID == 0) {
printf("CRITICAL %d threads: %.3f cycles\n", nthreads, operation_cost);
}
}
}
void test_parallel_loop_static(unsigned int nthreads)
{
int i;
int j;
unsigned int start = startTimer();
float iteration_cost = 0;
for (i = 0; i < NB_ITER; i++)
{
#pragma omp parallel for num_threads(nthreads)
for (j = 0; j < LOOP_ITER; j++)
{
volatile int a = j;
}
}
iteration_cost = ((float) getTimer(start)/(NB_ITER * LOOP_ITER));
printf("PARALLEL FOR %d threads STATIC %d iter: %.3f cycle(s) per iteration\n", nthreads, LOOP_ITER, iteration_cost);
}
void test_parallel_single(unsigned int nthreads)
{
#pragma omp parallel num_threads(nthreads)
{
int i;
int j;
unsigned int start = startTimer();
float iteration_cost = 0;
for (i = 0; i < NB_ITER; i++)
{
#pragma omp single
{
volatile int a = 0;
}
}
if (omp_get_thread_num() == 0)
{
iteration_cost = ((float) getTimer(start)/(NB_ITER * LOOP_ITER));
printf("PARALLEL SINGLE %d threads STATIC %d iter: %.3f cycle(s) per iteration\n",
nthreads,
LOOP_ITER,
iteration_cost);
}
}
}
void test_entry()
{
for (uint32_t i = 1; i <= pi_cl_cluster_nb_pe_cores(); i++)
{
test_barrier(i);
}
printf("\n");
for (uint32_t i = 1; i <= pi_cl_cluster_nb_pe_cores(); i++)
{
test_critical(i);
}
printf("\n");
for (uint32_t i = 1; i <= pi_cl_cluster_nb_pe_cores(); i++)
{
test_parallel_loop_static (i);
}
printf("\n");
for (uint32_t i = 1; i <= pi_cl_cluster_nb_pe_cores(); i++)
{
test_parallel_single(i);
}
}
void launch_test(void)
{
printf("Entering main controller\n");
uint32_t errors = 0;
uint32_t core_id = pi_core_id(), cluster_id = pi_cluster_id();
struct pi_device cluster_dev;
struct pi_cluster_conf cl_conf;
/* Init cluster configuration structure. */
pi_cluster_conf_init(&cl_conf);
cl_conf.id = 0; /* Set cluster ID. */
/* Configure & open cluster. */
pi_open_from_conf(&cluster_dev, &cl_conf);
if (pi_cluster_open(&cluster_dev))
{
printf("Cluster open failed !\n");
pmsis_exit(-1);
}
/* Prepare cluster task and send it to cluster. */
struct pi_cluster_task cl_task;
pi_cluster_task(&cl_task, test_entry, NULL);
pi_cluster_send_task_to_cl(&cluster_dev, &cl_task);
pi_cluster_close(&cluster_dev);
printf("Test success !\n");
pmsis_exit(errors);
}
/* Program Entry. */
int main(void)
{
printf("\n\n\t *** OpenMP Benchmark ***\n\n");
return pmsis_kickoff((void *) launch_test);
}
|
builder.h | // Copyright (c) 2015, The Regents of the University of California (Regents)
// See LICENSE.txt for license details
#ifndef BUILDER_H_
#define BUILDER_H_
#include <algorithm>
#include <parallel/algorithm>
#include <cinttypes>
#include <fstream>
#include <functional>
#include <type_traits>
#include <utility>
#include <omp.h>
#include <cassert>
#include <vector>
#include "command_line.h"
#include "generator.h"
#include "graph.h"
#include "platform_atomics.h"
#include "pvector.h"
#include "reader.h"
#include "timer.h"
#include "util.h"
#include "sliding_queue.h"
/*
GAP Benchmark Suite
Class: BuilderBase
Author: Scott Beamer
Given arguements from the command line (cli), returns a built graph
- MakeGraph() will parse cli and obtain edgelist and call
MakeGraphFromEL(edgelist) to perform actual graph construction
- edgelist can be from file (reader) or synthetically generated (generator)
- Common case: BuilderBase typedef'd (w/ params) to be Builder (benchmark.h)
*/
template <typename NodeID_, typename DestID_ = NodeID_,
typename WeightT_ = NodeID_, bool invert = true>
class BuilderBase {
typedef EdgePair<NodeID_, DestID_> Edge;
typedef pvector<Edge> EdgeList;
const CLBase &cli_;
bool symmetrize_;
bool needs_weights_;
int64_t num_nodes_ = -1;
public:
explicit BuilderBase(const CLBase &cli) : cli_(cli) {
symmetrize_ = cli_.symmetrize();
needs_weights_ = !std::is_same<NodeID_, DestID_>::value;
}
DestID_ GetSource(EdgePair<NodeID_, NodeID_> e) {
return e.u;
}
DestID_ GetSource(EdgePair<NodeID_, NodeWeight<NodeID_, WeightT_>> e) {
return NodeWeight<NodeID_, WeightT_>(e.u, e.v.w);
}
NodeID_ FindMaxNodeID(const EdgeList &el) {
NodeID_ max_seen = 0;
#pragma omp parallel for reduction(max : max_seen)
for (auto it = el.begin(); it < el.end(); it++) {
Edge e = *it;
max_seen = std::max(max_seen, e.u);
max_seen = std::max(max_seen, (NodeID_) e.v);
}
return max_seen;
}
pvector<NodeID_> CountDegrees(const EdgeList &el, bool transpose) {
pvector<NodeID_> degrees(num_nodes_, 0);
#pragma omp parallel for
for (auto it = el.begin(); it < el.end(); it++) {
Edge e = *it;
if (symmetrize_ || (!symmetrize_ && !transpose))
fetch_and_add(degrees[e.u], 1);
if (symmetrize_ || (!symmetrize_ && transpose))
fetch_and_add(degrees[(NodeID_) e.v], 1);
}
return degrees;
}
static
pvector<SGOffset> PrefixSum(const pvector<NodeID_> °rees) {
pvector<SGOffset> sums(degrees.size() + 1);
SGOffset total = 0;
for (size_t n=0; n < degrees.size(); n++) {
sums[n] = total;
total += degrees[n];
}
sums[degrees.size()] = total;
return sums;
}
static
pvector<SGOffset> ParallelPrefixSum(const pvector<NodeID_> °rees) {
const size_t block_size = 1<<20;
const size_t num_blocks = (degrees.size() + block_size - 1) / block_size;
pvector<SGOffset> local_sums(num_blocks);
#pragma omp parallel for
for (size_t block=0; block < num_blocks; block++) {
SGOffset lsum = 0;
size_t block_end = std::min((block + 1) * block_size, degrees.size());
for (size_t i=block * block_size; i < block_end; i++)
lsum += degrees[i];
local_sums[block] = lsum;
}
pvector<SGOffset> bulk_prefix(num_blocks+1);
SGOffset total = 0;
for (size_t block=0; block < num_blocks; block++) {
bulk_prefix[block] = total;
total += local_sums[block];
}
bulk_prefix[num_blocks] = total;
pvector<SGOffset> prefix(degrees.size() + 1);
#pragma omp parallel for
for (size_t block=0; block < num_blocks; block++) {
SGOffset local_total = bulk_prefix[block];
size_t block_end = std::min((block + 1) * block_size, degrees.size());
for (size_t i=block * block_size; i < block_end; i++) {
prefix[i] = local_total;
local_total += degrees[i];
}
}
prefix[degrees.size()] = bulk_prefix[num_blocks];
return prefix;
}
// Removes self-loops and redundant edges
// Side effect: neighbor IDs will be sorted
void SquishCSR(const CSRGraph<NodeID_, DestID_, invert> &g, bool transpose,
DestID_*** sq_index, DestID_** sq_neighs) {
pvector<NodeID_> diffs(g.num_nodes());
DestID_ *n_start, *n_end;
#pragma omp parallel for private(n_start, n_end)
for (NodeID_ n=0; n < g.num_nodes(); n++) {
if (transpose) {
n_start = g.in_neigh(n).begin();
n_end = g.in_neigh(n).end();
} else {
n_start = g.out_neigh(n).begin();
n_end = g.out_neigh(n).end();
}
std::sort(n_start, n_end);
DestID_ *new_end = std::unique(n_start, n_end);
new_end = std::remove(n_start, new_end, n);
diffs[n] = new_end - n_start;
}
pvector<SGOffset> sq_offsets = ParallelPrefixSum(diffs);
*sq_neighs = new DestID_[sq_offsets[g.num_nodes()]];
*sq_index = CSRGraph<NodeID_, DestID_>::GenIndex(sq_offsets, *sq_neighs);
#pragma omp parallel for private(n_start)
for (NodeID_ n=0; n < g.num_nodes(); n++) {
if (transpose)
n_start = g.in_neigh(n).begin();
else
n_start = g.out_neigh(n).begin();
std::copy(n_start, n_start+diffs[n], (*sq_index)[n]);
}
}
CSRGraph<NodeID_, DestID_, invert> SquishGraph(
const CSRGraph<NodeID_, DestID_, invert> &g) {
DestID_ **out_index, *out_neighs, **in_index, *in_neighs;
SquishCSR(g, false, &out_index, &out_neighs);
if (g.directed()) {
if (invert)
SquishCSR(g, true, &in_index, &in_neighs);
return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index,
out_neighs, in_index,
in_neighs);
} else {
return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index,
out_neighs);
}
}
/*
Graph Bulding Steps (for CSR):
- Read edgelist once to determine vertex degrees (CountDegrees)
- Determine vertex offsets by a prefix sum (ParallelPrefixSum)
- Allocate storage and set points according to offsets (GenIndex)
- Copy edges into storage
*/
void MakeCSR(const EdgeList &el, bool transpose, DestID_*** index,
DestID_** neighs) {
pvector<NodeID_> degrees = CountDegrees(el, transpose);
pvector<SGOffset> offsets = ParallelPrefixSum(degrees);
*neighs = new DestID_[offsets[num_nodes_]];
*index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, *neighs);
#pragma omp parallel for
for (auto it = el.begin(); it < el.end(); it++) {
Edge e = *it;
if (symmetrize_ || (!symmetrize_ && !transpose))
(*neighs)[fetch_and_add(offsets[e.u], 1)] = e.v;
if (symmetrize_ || (!symmetrize_ && transpose))
(*neighs)[fetch_and_add(offsets[static_cast<NodeID_>(e.v)], 1)] =
GetSource(e);
}
}
CSRGraph<NodeID_, DestID_, invert> MakeGraphFromEL(EdgeList &el) {
DestID_ **index = nullptr, **inv_index = nullptr;
DestID_ *neighs = nullptr, *inv_neighs = nullptr;
Timer t;
t.Start();
if (num_nodes_ == -1)
num_nodes_ = FindMaxNodeID(el)+1;
//#if 0 //TEMP
if (needs_weights_)
Generator<NodeID_, DestID_, WeightT_>::InsertWeights(el);
//#endif
MakeCSR(el, false, &index, &neighs);
if (!symmetrize_ && invert)
MakeCSR(el, true, &inv_index, &inv_neighs);
t.Stop();
PrintTime("Build Time", t.Seconds());
if (symmetrize_)
return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs);
else
return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs,
inv_index, inv_neighs);
}
#if 0 //will complete the code later
CSRGraph<NodeID_, DestID_, invert> relabelForSpatialLocality(
const CSRGraph<NodeID_, DestID_, invert> &g) {
if (g.directed()) {
// Will add support soon
}
else {
Timer t;
t.start();
/* STEP I: make a map between new and old vertex labels */
long long counter = 0; //keep track of local counts
std::map<NodeID_, int64_t> reMap[128]; //Conservatively assuming we will never use more than 128 threads
/* relabel vertices in parallel (using local counter) */
#pragma omp parallel for firstprivate(count)
for (NodeID_ v = 0; v < g.num_nodes(); v++) {
if (reMap[omp_get_thread_num()].find(v) == reMap.end()) {
// vertex hasn't been labelled
reMap.insert(std::pair<NodeID_, int64_t>(v, counter));
counter++;
}
for (NodeID_ u : g.in_neigh(v)) {
if (reMap[omp_get_thread_num()].find(u) == reMap.end()) {
// vertex hasn't been labelled
reMap.insert(std::pair<NodeID_, int64_t>(u, counter));
counter++;
}
}
}
/* Update counts based on maximum count for each thread */
int64_t offset = 0;
for (int i = 0; i < 128; i++) {
if (reMap[i].size() != 0) {
// adding offset to all counts of current map
std::map<NodeID_, int64_t>::iterator it, it_end;
#pragma omp parallel for
for (it = reMap[i].begin(), it_end = reMap[i].end(); it != it_end; it++) {
it->second += offset;
}
// finding maximum value of current set
int64_t maxVal = 0;
#pragma omp parallel for reduction(max: maxVal)
for (it = reMap[i].begin(), it_end = reMap[i].end(); it != it_end; it++) {
if (it->second > maxVal) {
maxVal = it->second;
}
}
offset = maxVal;
}
}
/* Merge local containers */
std::map <NodeID_, int64_t> merged_reMap;
for (int i = 0; i < 128; i++) {
if (reMap[i].size() != 0) {
merged_reMap.insert(reMap[i].begin(), reMap[i].end());
}
}
/* STEP II: rewrite CSR based on this reMap */
DestID_* neighs = new DestID_[2 * g.num_edges()];
DestID_** index = CSRGraph<NodeID_, DestID_>::relabelIndex(offsets, neighs, reMap);
}
}
#endif
CSRGraph<NodeID_, DestID_, invert> MakeGraph() {
CSRGraph<NodeID_, DestID_, invert> g;
{ // extra scope to trigger earlier deletion of el (save memory)
EdgeList el;
if (cli_.filename() != "") {
Reader<NodeID_, DestID_, WeightT_, invert> r(cli_.filename());
if ((r.GetSuffix() == ".sg") || (r.GetSuffix() == ".wsg")) {
return r.ReadSerializedGraph();
} else {
el = r.ReadFile(needs_weights_);
}
} else if (cli_.scale() != -1) {
Generator<NodeID_, DestID_> gen(cli_.scale(), cli_.degree());
el = gen.GenerateEL(cli_.uniform());
}
g = MakeGraphFromEL(el);
}
#if 0
if (cli_.relabel() == 1) {
g_new = relabelForSpatialLocality(g);
}
#endif
return SquishGraph(g);
}
// Relabels (and rebuilds) graph by order of decreasing degree
static
CSRGraph<NodeID_, DestID_, invert> RelabelByDegree(
const CSRGraph<NodeID_, DestID_, invert> &g) {
if (g.directed()) {
std::cout << "Cannot relabel directed graph" << std::endl;
std::exit(-11);
}
Timer t;
t.Start();
typedef std::pair<int64_t, NodeID_> degree_node_p;
pvector<degree_node_p> degree_id_pairs(g.num_nodes());
#pragma omp parallel for
for (NodeID_ n=0; n < g.num_nodes(); n++)
degree_id_pairs[n] = std::make_pair(g.out_degree(n), n);
std::sort(degree_id_pairs.begin(), degree_id_pairs.end(),
std::greater<degree_node_p>());
pvector<NodeID_> degrees(g.num_nodes());
pvector<NodeID_> new_ids(g.num_nodes());
#pragma omp parallel for
for (NodeID_ n=0; n < g.num_nodes(); n++) {
degrees[n] = degree_id_pairs[n].first;
new_ids[degree_id_pairs[n].second] = n;
}
pvector<SGOffset> offsets = ParallelPrefixSum(degrees);
DestID_* neighs = new DestID_[offsets[g.num_nodes()]];
DestID_** index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, neighs);
#pragma omp parallel for schedule (dynamic, 1024)
for (NodeID_ u=0; u < g.num_nodes(); u++) {
for (NodeID_ v : g.out_neigh(u))
neighs[offsets[new_ids[u]]++] = new_ids[v];
std::sort(index[new_ids[u]], index[new_ids[u]+1]);
}
t.Stop();
PrintTime("Relabel", t.Seconds());
return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), index, neighs);
}
// cheap version of MIT's "frequency based clustering" approach. Instead of sorting by
// degree order, this version only ensures that all the hub vertices are clustered at one
// end of the array
static
CSRGraph<NodeID_, DestID_, invert> degreeCluster(
const CSRGraph<NodeID_, DestID_, invert> &g, bool outDegree, pvector<NodeID_>& new_ids, bool createOnlyDegList, bool createBothCSRs, int cutoffFactor) {
Timer t;
t.Start();
int numThreads = omp_get_max_threads();
if (g.directed() == true) {
/* Step I: identify number and position of hubs in each threads partition*/
const int PADDING = 64 / sizeof(NodeID_);
NodeID_* localOffsets = new NodeID_[numThreads * PADDING]();
NodeID_ partitionSz = g.num_nodes() / numThreads;
NodeID_ avgDegree = g.num_edges_directed() / g.num_nodes();
NodeID_ degreeCutoff = avgDegree * cutoffFactor;
#pragma omp parallel
{
int tid = omp_get_thread_num();
NodeID_ startID = partitionSz * tid;
NodeID_ stopID = partitionSz * (tid + 1);
if (tid == numThreads - 1) {
stopID = g.num_nodes();
}
for (NodeID_ n = startID; n < stopID; ++n) {
if (outDegree) {
if (g.out_degree(n) > degreeCutoff) {
++localOffsets[tid * PADDING];
new_ids[n] = 1;
}
}
else {
if (g.in_degree(n) > degreeCutoff) {
++localOffsets[tid * PADDING];
new_ids[n] = 1;
}
}
}
}
NodeID_ sum(0);
for (int tid = 0; tid < numThreads; ++tid) {
NodeID_ origCount = localOffsets[tid * PADDING];
localOffsets[tid * PADDING] = sum;
sum += origCount;
}
/* Step II: assign remap for the hub vertices first */
#pragma omp parallel
{
NodeID_ localCtr(0);
int tid = omp_get_thread_num();
NodeID_ startID = partitionSz * tid;
NodeID_ stopID = partitionSz * (tid + 1);
if (tid == numThreads - 1) {
stopID = g.num_nodes();
}
for (NodeID_ n = startID; n < stopID; ++n) {
if (new_ids[n] != -1) {
new_ids[n] = localOffsets[tid * PADDING] + localCtr;
++localCtr;
}
}
}
delete[] localOffsets;
/* Step III: assigning remap for (easy) non hub vertices */
NodeID_ numHubs = sum;
SlidingQueue<NodeID_> queue(numHubs);
#pragma omp parallel
{
QueueBuffer<NodeID_> lqueue(queue, numHubs / numThreads);
#pragma omp for
for (NodeID_ n = numHubs; n < g.num_nodes(); ++n) {
if (new_ids[n] == -1) {
new_ids[n] = n;
}
else {
NodeID_ remappedTo = new_ids[n];
if (new_ids[remappedTo] == -1) {
new_ids[remappedTo] = n; //Swap ids
}
else {
lqueue.push_back(n);
}
}
}
lqueue.flush();
}
queue.slide_window();
/* Step IV: assigning remaps for remaining non hubs */
NodeID_ unassignedCtr = 0;
auto q_iter = queue.begin();
#pragma omp parallel for
for (NodeID_ n = 0; n < numHubs; ++n) {
if (new_ids[n] == -1) {
NodeID_ u = *(q_iter + __sync_fetch_and_add(&unassignedCtr, 1));
new_ids[n] = u;
}
}
/* Step V: generate degree to build a new graph */
pvector<NodeID_> degrees(g.num_nodes());
pvector<NodeID_> inv_degrees(g.num_nodes());
if (outDegree == true) {
#pragma omp parallel for
for (NodeID_ n=0; n < g.num_nodes(); n++) {
degrees[new_ids[n]] = g.out_degree(n);
inv_degrees[new_ids[n]] = g.in_degree(n);
}
}
else {
#pragma omp parallel for
for (NodeID_ n=0; n < g.num_nodes(); n++) {
degrees[new_ids[n]] = g.in_degree(n);
inv_degrees[new_ids[n]] = g.out_degree(n);
}
}
/* Graph building phase */
pvector<SGOffset> offsets = ParallelPrefixSum(inv_degrees);
DestID_* neighs = new DestID_[offsets[g.num_nodes()]];
DestID_** index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, neighs);
#pragma omp parallel for schedule (dynamic, 1024)
for (NodeID_ u=0; u < g.num_nodes(); u++) {
if (outDegree == true) {
for (NodeID_ v : g.in_neigh(u))
neighs[offsets[new_ids[u]]++] = new_ids[v];
}
else {
for (NodeID_ v : g.out_neigh(u))
neighs[offsets[new_ids[u]]++] = new_ids[v];
}
}
DestID_* inv_neighs(nullptr);
DestID_** inv_index(nullptr);
if (createOnlyDegList == true || createBothCSRs == true) {
// making the inverse list (in-degrees in this case)
pvector<SGOffset> inv_offsets = ParallelPrefixSum(degrees);
inv_neighs = new DestID_[inv_offsets[g.num_nodes()]];
inv_index = CSRGraph<NodeID_, DestID_>::GenIndex(inv_offsets, inv_neighs);
if (createBothCSRs == true) {
#pragma omp parallel for schedule(dynamic, 1024)
for (NodeID_ u=0; u < g.num_nodes(); u++) {
if (outDegree == true) {
for (NodeID_ v : g.out_neigh(u))
inv_neighs[inv_offsets[new_ids[u]]++] = new_ids[v];
}
else {
for (NodeID_ v : g.in_neigh(u))
inv_neighs[inv_offsets[new_ids[u]]++] = new_ids[v];
}
}
}
}
t.Stop();
PrintTime("HubCluster time", t.Seconds());
if (outDegree == true) {
return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), inv_index, inv_neighs, index, neighs);
}
else {
return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), index, neighs, inv_index, inv_neighs);
}
}
else {
/* Undirected graphs - no need to make separate lists for in and out degree */
/* Step I: identify number and position of hubs in each threads partition*/
const int PADDING = 64 / sizeof(NodeID_);
NodeID_* localOffsets = new NodeID_[numThreads * PADDING]();
NodeID_ partitionSz = g.num_nodes() / numThreads;
NodeID_ avgDegree = g.num_edges_directed() / g.num_nodes();
NodeID_ degreeCutoff = avgDegree * cutoffFactor;
#pragma omp parallel
{
int tid = omp_get_thread_num();
NodeID_ startID = partitionSz * tid;
NodeID_ stopID = partitionSz * (tid + 1);
if (tid == numThreads - 1) {
stopID = g.num_nodes();
}
for (NodeID_ n = startID; n < stopID; ++n) {
if (g.out_degree(n) > degreeCutoff) {
++localOffsets[tid * PADDING];
new_ids[n] = 1;
}
}
}
NodeID_ sum(0);
for (int tid = 0; tid < numThreads; ++tid) {
NodeID_ origCount = localOffsets[tid * PADDING];
localOffsets[tid * PADDING] = sum;
sum += origCount;
}
//std::cout << "[STAT] number of hubs = " << sum << std::endl;
/* Step II: assign remap for the hub vertices first */
#pragma omp parallel
{
NodeID_ localCtr(0);
int tid = omp_get_thread_num();
NodeID_ startID = partitionSz * tid;
NodeID_ stopID = partitionSz * (tid + 1);
if (tid == numThreads - 1) {
stopID = g.num_nodes();
}
for (NodeID_ n = startID; n < stopID; ++n) {
if (new_ids[n] != -1) {
new_ids[n] = localOffsets[tid * PADDING] + localCtr;
++localCtr;
}
}
}
delete[] localOffsets; //retire localOffsets
/* Step III: assigning remap for (easy) non hub vertices */
NodeID_ numHubs = sum;
SlidingQueue<NodeID_> queue(numHubs);
#pragma omp parallel
{
QueueBuffer<NodeID_> lqueue(queue, numHubs / numThreads);
#pragma omp for
for (NodeID_ n = numHubs; n < g.num_nodes(); ++n) {
if (new_ids[n] == -1) {
new_ids[n] = n;
}
else {
NodeID_ remappedTo = new_ids[n];
if (new_ids[remappedTo] == -1) {
new_ids[remappedTo] = n; //Swap ids
}
else {
lqueue.push_back(n);
}
}
}
lqueue.flush();
}
queue.slide_window();
/* Step IV: assigning remaps for remaining non hubs */
NodeID_ unassignedCtr = 0;
auto q_iter = queue.begin();
#pragma omp parallel for
for (NodeID_ n = 0; n < numHubs; ++n) {
if (new_ids[n] == -1) {
NodeID_ u = *(q_iter + __sync_fetch_and_add(&unassignedCtr, 1));
new_ids[n] = u;
}
}
/* Step V: generate degree to build a new graph */
pvector<NodeID_> degrees(g.num_nodes());
#pragma omp parallel for
for (NodeID_ n=0; n < g.num_nodes(); n++) {
degrees[new_ids[n]] = g.out_degree(n);
}
/* Graph building phase */
pvector<SGOffset> offsets = ParallelPrefixSum(degrees);
DestID_* neighs = new DestID_[offsets[g.num_nodes()]];
DestID_** index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, neighs);
#pragma omp parallel for schedule (dynamic, 1024)
for (NodeID_ u=0; u < g.num_nodes(); u++) {
for (NodeID_ v : g.out_neigh(u))
neighs[offsets[new_ids[u]]++] = new_ids[v];
}
t.Stop();
PrintTime("HubCluster time", t.Seconds());
return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), index, neighs);
}
}
// Similar to the previous function but handles directed graphs,
// weighted graphs, and also does relabeling by user specified method
static
CSRGraph<NodeID_, DestID_, invert> degreeCluster_weighted(
const CSRGraph<NodeID_, DestID_, invert> &g, bool outDegree, pvector<NodeID_> &new_ids, bool createOnlyDegList, bool createBothCSRs, int cutoffFactor) {
Timer t;
t.Start();
int numThreads = omp_get_max_threads();
if (g.directed() == true) {
/* Step I: identify number and position of hubs in each threads partition*/
const int PADDING = 64 / sizeof(NodeID_);
NodeID_* localOffsets = new NodeID_[numThreads * PADDING]();
NodeID_ partitionSz = g.num_nodes() / numThreads;
NodeID_ avgDegree = g.num_edges_directed() / g.num_nodes();
NodeID_ degreeCutoff = avgDegree * cutoffFactor;
#pragma omp parallel
{
int tid = omp_get_thread_num();
NodeID_ startID = partitionSz * tid;
NodeID_ stopID = partitionSz * (tid + 1);
if (tid == numThreads - 1) {
stopID = g.num_nodes();
}
for (NodeID_ n = startID; n < stopID; ++n) {
if (outDegree) {
if (g.out_degree(n) > degreeCutoff) {
++localOffsets[tid * PADDING];
new_ids[n] = 1;
}
}
else {
if (g.in_degree(n) > degreeCutoff) {
++localOffsets[tid * PADDING];
new_ids[n] = 1;
}
}
}
}
NodeID_ sum(0);
for (int tid = 0; tid < numThreads; ++tid) {
NodeID_ origCount = localOffsets[tid * PADDING];
localOffsets[tid * PADDING] = sum;
sum += origCount;
}
//std::cout << "[STAT] number of hubs = " << sum << std::endl;
/* Step II: assign remap for the hub vertices first */
#pragma omp parallel
{
NodeID_ localCtr(0);
int tid = omp_get_thread_num();
NodeID_ startID = partitionSz * tid;
NodeID_ stopID = partitionSz * (tid + 1);
if (tid == numThreads - 1) {
stopID = g.num_nodes();
}
for (NodeID_ n = startID; n < stopID; ++n) {
if (new_ids[n] != -1) {
new_ids[n] = localOffsets[tid * PADDING] + localCtr;
++localCtr;
}
}
}
delete[] localOffsets;
/* Step III: assigning remap for (easy) non hub vertices */
NodeID_ numHubs = sum;
SlidingQueue<NodeID_> queue(numHubs);
#pragma omp parallel
{
QueueBuffer<NodeID_> lqueue(queue, numHubs / numThreads);
#pragma omp for
for (NodeID_ n = numHubs; n < g.num_nodes(); ++n) {
if (new_ids[n] == -1) {
new_ids[n] = n;
}
else {
NodeID_ remappedTo = new_ids[n];
if (new_ids[remappedTo] == -1) {
new_ids[remappedTo] = n; //Swap ids
}
else {
lqueue.push_back(n);
}
}
}
lqueue.flush();
}
queue.slide_window();
/* Step IV: assigning remaps for remaining non hubs */
NodeID_ unassignedCtr = 0;
auto q_iter = queue.begin();
#pragma omp parallel for
for (NodeID_ n = 0; n < numHubs; ++n) {
if (new_ids[n] == -1) {
NodeID_ u = *(q_iter + __sync_fetch_and_add(&unassignedCtr, 1));
new_ids[n] = u;
}
}
/* Step V: generate degree to build a new graph */
pvector<NodeID_> degrees(g.num_nodes());
pvector<NodeID_> inv_degrees(g.num_nodes());
if (outDegree == true) {
#pragma omp parallel for
for (NodeID_ n=0; n < g.num_nodes(); n++) {
degrees[new_ids[n]] = g.out_degree(n);
inv_degrees[new_ids[n]] = g.in_degree(n);
}
}
else {
#pragma omp parallel for
for (NodeID_ n=0; n < g.num_nodes(); n++) {
degrees[new_ids[n]] = g.in_degree(n);
inv_degrees[new_ids[n]] = g.out_degree(n);
}
}
/* Graph building phase */
pvector<SGOffset> offsets = ParallelPrefixSum(inv_degrees);
DestID_* neighs = new DestID_[offsets[g.num_nodes()]];
DestID_** index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, neighs);
#pragma omp parallel for schedule(dynamic, 1024)
for (NodeID_ u=0; u < g.num_nodes(); u++) {
if (outDegree) {
for (auto v : g.in_neigh(u)) {
auto oldWeight = v.w;
auto newID = new_ids[(NodeID_) v.v];
DestID_ newV (newID, oldWeight);
neighs[offsets[new_ids[u]]++] = newV;
}
}
else {
for (auto v : g.out_neigh(u)) {
auto oldWeight = v.w;
auto newID = new_ids[(NodeID_) v.v];
DestID_ newV (newID, oldWeight);
neighs[offsets[new_ids[u]]++] = newV;
}
}
}
DestID_* inv_neighs(nullptr);
DestID_** inv_index(nullptr);
if (createBothCSRs == true || createOnlyDegList == true) {
// making the inverse list (in-degrees in this case)
pvector<SGOffset> inv_offsets = ParallelPrefixSum(degrees);
inv_neighs = new DestID_[inv_offsets[g.num_nodes()]];
inv_index = CSRGraph<NodeID_, DestID_>::GenIndex(inv_offsets, inv_neighs);
if (createBothCSRs == true) {
#pragma omp parallel for schedule(dynamic, 1024)
for (NodeID_ u=0; u < g.num_nodes(); u++) {
if (outDegree == true) {
for (auto v : g.out_neigh(u)) {
auto oldWeight = v.w;
auto newID = new_ids[(NodeID_) v.v];
DestID_ newV (newID, oldWeight);
inv_neighs[inv_offsets[new_ids[u]]++] = newV;
}
}
else {
for (auto v : g.in_neigh(u)) {
auto oldWeight = v.w;
auto newID = new_ids[(NodeID_) v.v];
DestID_ newV (newID, oldWeight);
inv_neighs[inv_offsets[new_ids[u]]++] = newV;
}
}
}
}
}
t.Stop();
PrintTime("HubCluster time", t.Seconds());
if (outDegree == true) {
return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), inv_index, inv_neighs, index, neighs);
}
else {
return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), index, neighs, inv_index, inv_neighs);
}
}
else {
/* Undirected graphs - no need to make separate lists for in and out degree */
/* Step I: identify number and position of hubs in each threads partition*/
const int PADDING = 64 / sizeof(NodeID_);
NodeID_* localOffsets = new NodeID_[numThreads * PADDING]();
NodeID_ partitionSz = g.num_nodes() / numThreads;
NodeID_ avgDegree = g.num_edges_directed() / g.num_nodes();
NodeID_ degreeCutoff = avgDegree * cutoffFactor;
#pragma omp parallel
{
int tid = omp_get_thread_num();
NodeID_ startID = partitionSz * tid;
NodeID_ stopID = partitionSz * (tid + 1);
if (tid == numThreads - 1) {
stopID = g.num_nodes();
}
for (NodeID_ n = startID; n < stopID; ++n) {
if (g.out_degree(n) > degreeCutoff) {
++localOffsets[tid * PADDING];
new_ids[n] = 1;
}
}
}
NodeID_ sum(0);
for (int tid = 0; tid < numThreads; ++tid) {
NodeID_ origCount = localOffsets[tid * PADDING];
localOffsets[tid * PADDING] = sum;
sum += origCount;
}
//std::cout << "[STAT] number of hubs = " << sum << std::endl;
/* Step II: assign remap for the hub vertices first */
#pragma omp parallel
{
NodeID_ localCtr(0);
int tid = omp_get_thread_num();
NodeID_ startID = partitionSz * tid;
NodeID_ stopID = partitionSz * (tid + 1);
if (tid == numThreads - 1) {
stopID = g.num_nodes();
}
for (NodeID_ n = startID; n < stopID; ++n) {
if (new_ids[n] != -1) {
new_ids[n] = localOffsets[tid * PADDING] + localCtr;
++localCtr;
}
}
}
delete[] localOffsets; //retire localOffsets
/* Step III: assigning remap for (easy) non hub vertices */
NodeID_ numHubs = sum;
SlidingQueue<NodeID_> queue(numHubs);
#pragma omp parallel
{
QueueBuffer<NodeID_> lqueue(queue, numHubs / numThreads);
#pragma omp for
for (NodeID_ n = numHubs; n < g.num_nodes(); ++n) {
if (new_ids[n] == -1) {
new_ids[n] = n;
}
else {
NodeID_ remappedTo = new_ids[n];
if (new_ids[remappedTo] == -1) {
new_ids[remappedTo] = n; //Swap ids
}
else {
lqueue.push_back(n);
}
}
}
lqueue.flush();
}
queue.slide_window();
/* Step IV: assigning remaps for remaining non hubs */
NodeID_ unassignedCtr = 0;
auto q_iter = queue.begin();
#pragma omp parallel for
for (NodeID_ n = 0; n < numHubs; ++n) {
if (new_ids[n] == -1) {
NodeID_ u = *(q_iter + __sync_fetch_and_add(&unassignedCtr, 1));
new_ids[n] = u;
}
}
/* Step V: generate degree to build a new graph */
pvector<NodeID_> degrees(g.num_nodes());
#pragma omp parallel for
for (NodeID_ n=0; n < g.num_nodes(); n++) {
degrees[new_ids[n]] = g.out_degree(n);
}
/* Graph building phase */
pvector<SGOffset> offsets = ParallelPrefixSum(degrees);
DestID_* neighs = new DestID_[offsets[g.num_nodes()]];
DestID_** index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, neighs);
#pragma omp parallel for schedule (dynamic, 1024)
for (NodeID_ u=0; u < g.num_nodes(); u++) {
for (auto v : g.out_neigh(u)) {
auto oldWeight = v.w;
auto newID = new_ids[(NodeID_) v.v];
DestID_ newV (newID, oldWeight);
neighs[offsets[new_ids[u]]++] = newV;
}
}
t.Stop();
PrintTime("HubCluster time", t.Seconds());
return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), index, neighs);
}
}
};
#endif // BUILDER_H_
|
erodr.c | #include <time.h>
#include <string.h>
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include "vector.h"
#include "io.h"
#include "image.h"
#include "params.h"
#include "util.h"
/*
* Particle type.
*/
typedef struct particle {
vec2 pos;
vec2 dir;
double vel;
double sediment;
double water;
} particle;
/*
* gradient & height tuple.
*/
typedef struct hg_tuple {
vec2 gradient;
double height;
} hg_tuple;
/*
* Bilinearly interpolate double value at (x, y) in map.
*/
double bil_interpolate_map_double(const image *map, vec2 pos) {
double *map_buffer = (double *) map->buffer;
double u, v, ul, ur, ll, lr, ipl_l, ipl_r;
int x_i = (int)pos.x;
int y_i = (int)pos.y;
u = pos.x - x_i;
v = pos.y - y_i;
ul = map_buffer[y_i*map->width + x_i];
ur = map_buffer[y_i*map->width + x_i + 1];
ll = map_buffer[(y_i + 1)*map->width + x_i];
lr = map_buffer[(y_i + 1)*map->width + x_i + 1];
ipl_l = (1 - v) * ul + v * ll;
ipl_r = (1 - v) * ur + v * lr;
return (1 - u) * ipl_l + u * ipl_r;
}
/*
* Deposits sediment at position `pos` in heighmap `hmap`.
* Deposition only affect immediate neighbouring gridpoints
* to `pos`.
*/
void deposit(image *hmap, vec2 pos, double amount) {
double *hmap_buffer = (double *) hmap->buffer;
int x_i = (int)pos.x;
int y_i = (int)pos.y;
double u = pos.x - x_i;
double v = pos.y - y_i;
hmap_buffer[y_i*hmap->width + x_i] += amount * (1 - u) * (1 - v);
hmap_buffer[y_i*hmap->width + x_i + 1] += amount * u * (1 - v);
hmap_buffer[(y_i + 1)*hmap->width + x_i] += amount * (1 - u) * v;
hmap_buffer[(y_i + 1)*hmap->width + x_i + 1] += amount * u * v;
}
/*
* Erodes heighmap `hmap` at position `pos` by amount `amount`.
* Erosion is distributed over an area defined through p_radius.
*/
void erode(image *hmap, vec2 pos, double amount, int radius) {
double *hmap_buffer = (double *) hmap->buffer;
if(radius < 1){
deposit(hmap, pos, -amount);
return;
}
int x0 = (int)pos.x - radius;
int y0 = (int)pos.y - radius;
int x_start = max(0, x0);
int y_start = max(0, y0);
int x_end = min(hmap->width, x0+2*radius+1);
int y_end = min(hmap->height, y0+2*radius+1);
// construct erosion/deposition kernel.
double kernel[2*radius + 1][2*radius + 1];
double kernel_sum = 0;
for(int y = y_start; y < y_end; y++) {
for(int x = x_start; x < x_end; x++) {
double d_x = x - pos.x;
double d_y = y - pos.y;
double distance = sqrt(d_x*d_x + d_y*d_y);
double w = fmax(0, radius - distance);
kernel_sum += w;
kernel[y-y0][x-x0] = w;
}
}
// normalize weights and apply changes on heighmap.
for(int y = y_start; y < y_end; y++) {
for(int x = x_start; x < x_end; x++) {
kernel[y-y0][x-x0] /= kernel_sum;
hmap_buffer[y*hmap->width + x] -= amount * kernel[y-y0][x-x0];
}
}
}
/*
* Returns gradient at (int x, int y) on heightmap `hmap`.
*/
vec2 gradient_at(image *hmap, int x, int y) {
double *hmap_buffer = (double *) hmap->buffer;
int idx = y * hmap->width + x;
//int right = y * hmap->width + min(x, hmap->width - 2);
//int below = min(y, hmap->height - 2) * hmap->width + x;
int right = idx + ((x > hmap->width - 2) ? 0 : 1);
int below = idx + ((y > hmap->height - 2) ? 0 : hmap->width);
vec2 g;
g.x = hmap_buffer[right] - hmap_buffer[idx];
g.y = hmap_buffer[below] - hmap_buffer[idx];
return g;
}
/*
* Returns interpolated gradient and height at (double x, double y) on
* heightmap `hmap`.
*/
hg_tuple height_gradient_at(image *hmap, vec2 pos) {
hg_tuple ret;
vec2 ul, ur, ll, lr, ipl_l, ipl_r;
int x_i = (int)pos.x;
int y_i = (int)pos.y;
double u = pos.x - x_i;
double v = pos.y - y_i;
ul = gradient_at(hmap, x_i, y_i);
ur = gradient_at(hmap, x_i + 1, y_i);
ll = gradient_at(hmap, x_i, y_i + 1);
lr = gradient_at(hmap, x_i + 1, y_i + 1);
ipl_l = add(scalar_mul(1 - v, ul), scalar_mul(v, ll));
ipl_r = add(scalar_mul(1 - v, ur), scalar_mul(v, lr));
ret.gradient = add(scalar_mul(1 - u, ipl_l), scalar_mul(u, ipl_r));
ret.height = bil_interpolate_map_double(hmap, pos);
return ret;
}
/*
* Runs hydraulic erosion simulation.
*/
void simulate_particles(image *hmap, sim_params *params) {
srand(time(NULL));
// simulate each particle
#pragma omp parallel for
for(int i = 0; i < params->n; i++) {
if(!((i+1) % 10000))
printf("Particles simulated: %d\n", i+1);
// spawn particle.
particle p;
double denom = (RAND_MAX / ((double)hmap->width - 1.0));
p.pos = (vec2){(double)rand() / denom, (double)rand() / denom};
p.dir = (vec2){0, 0};
p.vel = 0;
p.sediment = 0;
p.water = 1;
for(int j = 0; j < params->ttl; j++) {
// interpolate gradient g and height h_old at p's position.
vec2 pos_old = p.pos;
hg_tuple hg = height_gradient_at(hmap, pos_old);
vec2 g = hg.gradient;
double h_old = hg.height;
// calculate new dir vector
p.dir = sub(
scalar_mul(params->p_enertia, p.dir),
scalar_mul(1 - params->p_enertia, g)
);
normalize(&p.dir);
// calculate new pos
p.pos = add(p.pos, p.dir);
// check bounds
vec2 pos_new = p.pos;
if(pos_new.x > (hmap->width-1) || pos_new.x < 0 ||
pos_new.y > (hmap->height-1) || pos_new.y < 0)
break;
// new height
double h_new = bil_interpolate_map_double(hmap, pos_new);
double h_diff = h_new - h_old;
// sediment capacity
double c = fmax(-h_diff, params->p_min_slope) * p.vel * p.water * params->p_capacity;
// decide whether to erode or deposit depending on particle properties
if(h_diff > 0 || p.sediment > c) {
double to_deposit = (h_diff > 0) ?
fmin(p.sediment, h_diff) :
(p.sediment - c) * params->p_deposition;
p.sediment -= to_deposit;
deposit(hmap, pos_old, to_deposit);
} else {
double to_erode = fmin((c - p.sediment) * params->p_erosion, -h_diff);
p.sediment += to_erode;
erode(hmap, pos_old, to_erode, params->p_radius);
}
// update `vel` and `water`
p.vel = sqrt(p.vel*p.vel + h_diff*params->p_gravity);
p.water *= (1 - params->p_evaporation);
}
}
}
/*
* Main.
*/
int main(int argc, char *argv[]) {
sim_params params = DEFAULT_PARAM;
image img;
// parse args.
char filepath[FILEPATH_MAXLEN];
char outputfilepath[FILEPATH_MAXLEN];
strcpy(outputfilepath, OUTPUTFILEPATH_DEFAULT);
bool ascii_out = false;
if(parse_args(argc, argv, filepath, outputfilepath, ¶ms, &ascii_out))
exit_with_info(1);
// load pgm heightmap.
if(load_pgm(filepath, &img))
exit_with_info(1);
// simulate hydraulic erosion
simulate_particles(&img, ¶ms);
// Maybe clamp
if (maybe_clamp(&img))
print_clipping_warning();
// Save results
save_pgm(outputfilepath, &img, ascii_out);
// free memory
release_image(&img);
}
|
openmp_utils.h | #pragma once
// #include <execution>
#include <hydra/common.h>
namespace hydra::utils {
template <typename T>
std::vector<T> combine_vectors(std::vector<std::vector<T>> const &vec_of_vec) {
idx_t size = 0;
for (auto const &vec : vec_of_vec) {
size += vec.size();
}
std::vector<T> total_vec(size);
idx_t offset = 0;
for (auto const &vec : vec_of_vec) {
#pragma omp parallel for
for (idx_t i = 0; i < vec.size(); ++i) {
total_vec[offset + i] = vec[i];
}
offset += vec.size();
}
return total_vec;
}
template <typename T>
std::vector<T>
combine_vectors_copy(std::vector<std::vector<T>> const &vec_of_vec) {
idx_t size = 0;
for (auto const &vec : vec_of_vec) {
size += vec.size();
}
std::vector<T> total_vec(size);
idx_t offset = 0;
for (auto const &vec : vec_of_vec) {
std::copy(vec.begin(), vec.end(), total_vec.begin() + offset);
offset += vec.size();
}
return total_vec;
}
} // namespace hydra::utils
|
fwi_core.c | /*
* =============================================================================
* Copyright (c) 2016-2018, Barcelona Supercomputing Center (BSC)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <organization> nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* =============================================================================
*/
#include "fwi/fwi_core.h"
#include "fwi/fwi_sched.h"
/*
* In order to generate a source for injection,
* /system/support/bscgeo/src/wavelet.c
* functions can be used.
*/
void kernel( propagator_t propagator, real waveletFreq, int shotid, char* outputfolder, char* shotfolder)
{
#if defined(USE_MPI)
/* find ourselves into the MPI space */
int mpi_rank, mpi_size;
MPI_Comm_rank( MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size( MPI_COMM_WORLD, &mpi_size);
#endif /* USE_MPI */
/* local variables */
int stacki;
double start_t, end_t;
real dt,dz,dx,dy;
integer dimmz, dimmx, dimmy, MaxYPlanesPerWorker, forw_steps, back_steps;
load_shot_parameters( shotid, &stacki, &dt, &forw_steps, &back_steps,
&dz, &dx, &dy,
&dimmz, &dimmx, &dimmy,
&MaxYPlanesPerWorker,
outputfolder, waveletFreq );
#if defined(USE_MPI)
/* aux variables, just to make it more readable */
const int FIRSTRANK = 0;
const int LASTRANK = mpi_size - 1;
/* Compute the integration limits in order to load the correct slice from the input
* velocity model. These are not the limits for the wave propagator! (they are local,
* i.e. starts at zero!) */
const integer y0 = (mpi_rank == FIRSTRANK) ? 0 : (MaxYPlanesPerWorker * mpi_rank) - HALO;
const integer yf = (mpi_rank == LASTRANK ) ? dimmy : y0 + MaxYPlanesPerWorker;
const integer edimmy = (yf - y0);
#else
const integer y0 = 0;
const integer yf = dimmy;
const integer edimmy = dimmy;
#endif /* USE_MPI */
/* Compute integration limits for the wave propagator.
* It assumes that the volume is local, so the indices start at zero */
const integer nz0 = 0;
const integer ny0 = 0;
const integer nx0 = 0;
const integer nzf = dimmz;
const integer nxf = dimmx;
const integer nyf = edimmy;
const integer numberOfCells = dimmz * dimmx * edimmy;
real *rho;
v_t v;
s_t s;
coeff_t coeffs;
print_debug("The length of local arrays is " I " cells zxy[%d][%d][%d]", numberOfCells, nzf, nxf, nyf);
/* allocate shot memory */
alloc_memory_shot ( dimmz, dimmx, (nyf - ny0), &coeffs, &s, &v, &rho);
/* load initial model from a binary file */
load_local_velocity_model ( waveletFreq, dimmz, dimmx, y0, yf, &coeffs, &s, &v, rho);
/* Allocate memory for IO buffer */
real* io_buffer = (real*) __malloc( ALIGN_REAL, numberOfCells * sizeof(real) * WRITTEN_FIELDS );
/* inspects every array positions for leaks. Enabled when DEBUG flag is defined */
check_memory_shot ( dimmz, dimmx, (nyf - ny0), &coeffs, &s, &v, rho);
/* Perform forward, backward or test propagations */
switch( propagator )
{
case( RTM_KERNEL ):
{
start_t = dtime();
propagate_shot ( FORWARD,
v, s, coeffs, rho,
forw_steps, back_steps -1,
dt,dz,dx,dy,
nz0, nzf, nx0, nxf, ny0, nyf,
stacki,
shotfolder,
io_buffer,
dimmz, dimmx, (nyf - ny0));
end_t = dtime();
print_stats("Forward propagation finished in %lf seconds", end_t - start_t );
start_t = dtime();
propagate_shot ( BACKWARD,
v, s, coeffs, rho,
forw_steps, back_steps -1,
dt,dz,dx,dy,
nz0, nzf, nx0, nxf, ny0, nyf,
stacki,
shotfolder,
io_buffer,
dimmz, dimmx, (nyf - ny0));
end_t = dtime();
print_stats("Backward propagation finished in %lf seconds", end_t - start_t );
#if defined(DO_NOT_PERFORM_IO)
print_info("Warning: we are not creating gradient nor preconditioner "
"fields, because IO is not enabled for this execution" );
#else
#if defined(USE_MPI)
if ( mpi_rank == 0 )
#endif /* USE_MPI */
{
char fnameGradient[300];
char fnamePrecond[300];
sprintf( fnameGradient, "%s/gradient_%05d.dat", shotfolder, shotid );
sprintf( fnamePrecond , "%s/precond_%05d.dat" , shotfolder, shotid );
FILE* fgradient = safe_fopen( fnameGradient, "wb", __FILE__, __LINE__ );
FILE* fprecond = safe_fopen( fnamePrecond , "wb", __FILE__, __LINE__ );
print_info("Storing local preconditioner field in %s", fnameGradient );
safe_fwrite( io_buffer, sizeof(real), numberOfCells * 12, fgradient, __FILE__, __LINE__ );
print_info("Storing local gradient field in %s", fnamePrecond);
safe_fwrite( io_buffer, sizeof(real), numberOfCells * 12, fprecond , __FILE__, __LINE__ );
safe_fclose( fnameGradient, fgradient, __FILE__, __LINE__ );
safe_fclose( fnamePrecond , fprecond , __FILE__, __LINE__ );
}
#endif /* end DO_NOT_PERFORM_IO */
break;
}
case( FM_KERNEL ):
{
start_t = dtime();
propagate_shot ( FWMODEL,
v, s, coeffs, rho,
forw_steps, back_steps -1,
dt,dz,dx,dy,
nz0, nzf, nx0, nxf, ny0, nyf,
stacki,
shotfolder,
io_buffer,
dimmz, dimmx, dimmy);
end_t = dtime();
print_stats("Forward Modelling finished in %lf seconds", end_t - start_t );
break;
}
default:
{
print_error("Invalid propagation identifier");
abort();
}
} /* end case */
// liberamos la memoria alocatada en el shot
free_memory_shot ( &coeffs, &s, &v, &rho);
__free( io_buffer );
};
void gather_shots( char* outputfolder, const real waveletFreq, const int nshots, const int numberOfCells )
{
#if defined(DO_NOT_PERFORM_IO)
print_info("Warning: we are not gathering the results because the IO is disabled "
"for this execution");
#else
/* --------- GLOBAL PRECONDITIONER ACCUMULATION --------- */
print_info("Gathering local preconditioner fields");
/* variables for timming */
double start_t, end_t;
/* buffers to read and accumulate the fields */
real* sumbuffer = (real*) __malloc( ALIGN_REAL, numberOfCells * sizeof(real) * WRITTEN_FIELDS );
real* readbuffer = (real*) __malloc( ALIGN_REAL, numberOfCells * sizeof(real) * WRITTEN_FIELDS );
start_t = dtime();
/* set buffer positions to zero */
memset ( sumbuffer, 0, numberOfCells * sizeof(real) * WRITTEN_FIELDS );
for( int shot=0; shot < nshots; shot++)
{
char readfilename[300];
sprintf( readfilename, "%s/shot.%2.1f.%05d/precond_%05d.dat",
outputfolder, waveletFreq, shot, shot);
print_info("Reading preconditioner file '%s'", readfilename );
FILE* freadfile = safe_fopen( readfilename, "rb", __FILE__, __LINE__ );
safe_fread ( readbuffer, sizeof(real), numberOfCells * WRITTEN_FIELDS, freadfile, __FILE__, __LINE__ );
#if defined(_OPENMP)
#pragma omp parallel for
#endif
#if defined(__INTEL_COMPILER)
#pragma simd
#endif
for( int i = 0; i < numberOfCells * WRITTEN_FIELDS; i++)
sumbuffer[i] += readbuffer[i];
fclose (freadfile);
}
char precondfilename[300];
sprintf( precondfilename, "%s/Preconditioner.%2.1f", outputfolder, waveletFreq );
FILE* precondfile = safe_fopen( precondfilename, "wb", __FILE__, __LINE__ );
safe_fwrite ( sumbuffer, sizeof(real), numberOfCells * WRITTEN_FIELDS, precondfile, __FILE__, __LINE__ );
safe_fclose( precondfilename, precondfile, __FILE__, __LINE__ );
end_t = dtime();
print_stats("Gatering process for preconditioner %s (freq %2.1f) "
"completed in: %lf seconds",
precondfilename, waveletFreq, end_t - start_t );
/* --------- GLOBAL GRADIENT ACCUMULATION --------- */
print_info("Gathering local gradient fields");
start_t = dtime();
/* set buffer positions to zero */
memset ( sumbuffer, 0, numberOfCells * sizeof(real) * WRITTEN_FIELDS );
for( int shot=0; shot < nshots; shot++)
{
char readfilename[300];
sprintf( readfilename, "%s/shot.%2.1f.%05d/gradient_%05d.dat",
outputfolder, waveletFreq, shot, shot);
print_info("Reading gradient file %s", readfilename );
FILE* freadfile = safe_fopen( readfilename, "rb", __FILE__, __LINE__ );
safe_fread ( readbuffer, sizeof(real), numberOfCells * WRITTEN_FIELDS, freadfile, __FILE__, __LINE__ );
#if defined(_OPENMP)
#pragma omp parallel for
#endif
#ifdef __INTEL_COMPILER
#pragma simd
#endif
for( int i = 0; i < numberOfCells * WRITTEN_FIELDS; i++)
sumbuffer[i] += readbuffer[i];
fclose (freadfile);
}
char gradientfilename[300];
sprintf( gradientfilename, "%s/Gradient.%2.1f", outputfolder, waveletFreq );
FILE* gradientfile = safe_fopen( gradientfilename, "wb", __FILE__, __LINE__ );
safe_fwrite ( sumbuffer, sizeof(real), numberOfCells * WRITTEN_FIELDS, gradientfile, __FILE__, __LINE__ );
safe_fclose( gradientfilename, gradientfile, __FILE__, __LINE__ );
end_t = dtime();
print_stats("Gatering process for gradient %s (freq %2.1f) "
"completed in: %lf seconds",
precondfilename, waveletFreq, end_t - start_t );
__free( sumbuffer);
__free( readbuffer);
#endif /* end DO_NOT_PERFORM_IO */
};
int execute_simulation( int argc, char* argv[] )
{
#if defined(USE_MPI)
MPI_Init ( &argc, &argv );
int mpi_rank;
MPI_Comm_rank( MPI_COMM_WORLD, &mpi_rank);
#elif !defined(USE_MPI) && defined(_OPENACC)
//TODO: fix name
int mpi_rank = 0;
#endif
#if defined(_OPENACC)
acc_init(acc_device_default);
int gpuid = mpi_rank % acc_get_num_devices( acc_device_default );
acc_set_device_num( gpuid, acc_device_default );
fprintf(stdout, "MPI rank %d with GPU %d (%d)\n",
mpi_rank, acc_get_device_num(acc_device_default), acc_get_num_devices(acc_device_default));
#endif /*_OPENACC*/
/* Load parameters from schedule file */
schedule_t s = load_schedule(argv[1]);
for(int i=0; i<s.nfreqs; i++)
{
/* Process one frequency at a time */
real waveletFreq = s.freq[i];
integer stacki = s.stacki[i];
real dt = s.dt[i];
integer forw_steps = s.forws[i];
integer back_steps = s.backs[i];
real dx = s.dx[i];
real dy = s.dy[i];
real dz = s.dz[i];
integer dimmz = s.dimmz[i];
integer dimmx = s.dimmx[i];
integer dimmy = s.dimmy[i];
//integer nworkers = s.nworkers[i];
integer MaxYPlanesPerWorker = s.ppd[i];
print_info("\n------ Computing %d-th frequency (%.2fHz). ------\n", i, waveletFreq);
const integer numberOfCells = dimmz * dimmx * dimmx;
const size_t VolumeMemory = numberOfCells * sizeof(real) * 58;
print_stats("Local domain size for freq %f [%d][%d][%d] is %lu bytes (%lf GB)",
waveletFreq, dimmz, dimmx, dimmy, VolumeMemory, TOGB(VolumeMemory) );
for(int grad=0; grad<s.ngrads; grad++) /* backward iteration */
{
print_info("Processing %d-gradient iteration", grad);
for(int shot=0; shot<s.nshots; shot++)
{
char shotfolder[512];
sprintf(shotfolder, "%s/shot.%2.2fHz.%03d", s.outputfolder, waveletFreq, shot);
#if defined(USE_MPI)
if ( mpi_rank == 0 )
#endif
{
create_folder( shotfolder );
store_shot_parameters( shot, &stacki, &dt, &forw_steps, &back_steps,
&dz, &dx, &dy,
&dimmz, &dimmx, &dimmy,
&MaxYPlanesPerWorker,
s.outputfolder, waveletFreq );
}
#if defined(USE_MPI)
MPI_Barrier( MPI_COMM_WORLD );
#endif
kernel( RTM_KERNEL, waveletFreq, shot, s.outputfolder, shotfolder);
print_info("\tGradient loop processed for %d-th shot", shot);
//update_shot()
}
//#if defined(USE_MPI)
// MPI_Barrier( MPI_COMM_WORLD );
//
// if ( mpi_rank == 0 ) {
// gather_shots( outputfolder, waveletFreq, nshots, numberOfCells );
// }
//
// MPI_Barrier( MPI_COMM_WORLD );
//#else
// gather_shots( s.outputfolder, waveletFreq, s.nshots, numberOfCells );
//#endif
for(int test=0; test<s.ntests; test++)
{
print_info("\tProcessing %d-th test iteration", test);
for(int shot=0; shot<s.nshots; shot++)
{
char shotfolder[512];
sprintf(shotfolder, "%s/test.%05d.shot.%2.2fHz.%03d",
s.outputfolder, test, waveletFreq, shot);
#if defined(USE_MPI)
if ( mpi_rank == 0)
#endif
{
create_folder( shotfolder );
store_shot_parameters( shot, &stacki, &dt, &forw_steps, &back_steps,
&dz, &dx, &dy,
&dimmz, &dimmx, &dimmy,
&MaxYPlanesPerWorker,
s.outputfolder, waveletFreq );
}
#if defined(USE_MPI)
MPI_Barrier( MPI_COMM_WORLD );
#endif
kernel( FM_KERNEL , waveletFreq, shot, s.outputfolder, shotfolder);
print_info("\t\tTest loop processed for the %d-th shot", shot);
}
} /* end of test loop */
} /* end of gradient loop */
} /* end of frequency loop */
#if defined(USE_MPI)
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
#endif
return 0;
}
|
clang-gdb1.c | /* simple OMP offload kernel from Jeff Sandoval 9/22/2020 */
#include <omp.h>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#ifdef USE_MPI
#include "mpi.h"
#include <unistd.h>
#endif
int main(int argc, char* argv[]) {
int delay = 20;
int iters = 1000;
if (argc > 1) {
delay = atoi(argv[1]);
}
if (argc > 2) {
iters = atoi(argv[2]);
}
#ifdef USE_MPI
int ierr = MPI_Init(&argc, &argv);
if (ierr != MPI_SUCCESS) {
fprintf(stderr,"Error: MPI_init() failed: %d\n", ierr);
}
int rank;
ierr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (ierr != MPI_SUCCESS) {
fprintf(stderr, "Error: MPI_init() failed: %d\n", ierr);
}
int num_ranks;
ierr = MPI_Comm_size(MPI_COMM_WORLD, &num_ranks);
if (ierr != MPI_SUCCESS) {
fprintf(stderr, "Error: MPI_init() failed: %d\n", ierr);
}
fprintf(stderr, "max number of threads %d\n", omp_get_max_threads());
if (delay > 0) {
fprintf(stderr, "sleeping for %d seconds\n", delay);
sleep(delay);
fprintf(stderr, "continuing\n");
}
#endif
int runningOnGPU = 0;
int checkVal=-1;
const int nCells=1000000;
double* m_gate = (double*)calloc(nCells,sizeof(double));
double* Vm = (double*)calloc(nCells,sizeof(double));
const double Mhu_a[] = { 9.9632117206253790e-01, 4.0825738726469545e-02, 6.3401613233199589e-04, 4.4158436861700431e-06, 1.1622058324043520e-08, 1.0000000000000000e+00, 4.0568375699663400e-02, 6.4216825832642788e-04, 4.2661664422410096e-06, 1.3559930396321903e-08, -1.3573468728873069e-11, -4.2594802366702580e-13, 7.6779952208246166e-15, 1.4260675804433780e-16, -2.6656212072499249e-18};
const double Tau_a[] = {1.7765862602413648e+01*0.02, 5.0010202770602419e-02*0.02, -7.8002064070783474e-04*0.02, -6.9399661775931530e-05*0.02, 1.6936588308244311e-06*0.02, 5.4629017090963798e-07*0.02, -1.3805420990037933e-08*0.02, -8.0678945216155694e-10*0.02, 1.6209833004622630e-11*0.02, 6.5130101230170358e-13*0.02, -6.9931705949674988e-15*0.02, -3.1161210504114690e-16*0.02, 5.0166191902609083e-19*0.02, 7.8608831661430381e-20*0.02, 4.3936315597226053e-22*0.02, -7.0535966258003289e-24*0.02, -9.0473475495087118e-26*0.02, -2.9878427692323621e-28*0.02, 1.0000000000000000e+00};
#pragma omp target enter data map(to: m_gate[:nCells])
#pragma omp target enter data map(to: Vm[:nCells])
for (int itime=0; itime<iters; itime++) {
#pragma omp target teams distribute parallel for thread_limit(128) map(from:checkVal)
for (int ii=0; ii<nCells; ii++) {
double sum1,sum2;
const double x = Vm[ii];
const int Mhu_l = 10;
const int Mhu_m = 5;
sum1 = 0;
for (int j = Mhu_m-1; j >= 0; j--)
sum1 = Mhu_a[j] + x*sum1;
sum2 = 0;
int k = Mhu_m + Mhu_l - 1;
for (int j = k; j >= Mhu_m; j--)
sum2 = Mhu_a[j] + x * sum2;
double mhu = sum1/sum2;
const int Tau_m = 18;
sum1 = 0;
for (int j = Tau_m-1; j >= 0; j--)
sum1 = Tau_a[j] + x*sum1;
double tauR = sum1;
m_gate[ii] += (mhu - m_gate[ii])*(1-exp(-tauR));
if (ii == 0)
checkVal=(int) (1000000.0 * m_gate[ii]);
}
}
/* Test if GPU is available using OpenMP4.5 */
#pragma omp target map(from:runningOnGPU)
{
if (omp_is_initial_device() == 0)
runningOnGPU = 1;
}
/* If still running on CPU, GPU must not be available */
if (runningOnGPU && (iters != 1000 || checkVal == 996321) ) {
printf("PASS\n");
} else if (!runningOnGPU) {
printf("FAIL - not running on a GPU\n");
return 1;
} else {
printf("FAIL iters=%d checkDelta=%d\n", iters, checkVal-996321);
return 1;
}
#ifdef USE_MPI
ierr = MPI_Finalize();
if (ierr != MPI_SUCCESS) {
fprintf(stderr, "Error: MPI_Finalize() failed: %d\n", ierr);
return 1;
}
#endif
return 0;
}
|
omp4_demo2.c | /*
Copyright since 2016 the OMPi Team
Dept. of Computer Science & Engineering, University of Ioannina
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/* omp4_demo2.c
* ------------
* Another demonstration of OpenMP4.0 constructs on the Zynq and the Epiphany.
*/
#include <omp.h>
#include <stdio.h>
void demo_devices()
{
int arg = 0;
/* Diagnostics */
printf("Available devices: %d\n", omp_get_num_devices());
printf("Default device: %d\n", omp_get_default_device());
/* arg is mapped to the device with initial value 0 */
#pragma omp target data map(to:arg)
{
int on_eCore, i;
for (i = 0; i < 20; i++)
{
/* Kernels 0 & 10 will execute on the host */
#pragma omp target map(from:on_eCore) if(i%10 != 0)
{
/* omp_is_initial_device returns TRUE only on Zynq */
on_eCore = !omp_is_initial_device();
arg++;
}
/* Get value from the device */
#pragma omp target update from(arg)
printf("Kernel %2d executed on %8s --- value of arg=%d\n", i,
on_eCore ? "Epiphany" : "Zynq", arg);
/* When the 10th kernel was executed on the host */
if (!on_eCore && i > 0)
{
/* Set arg to 100 and push to the device */
printf(" --> Changing arg value to 100\n");
arg = 100;
#pragma omp target update to(arg)
}
}
}
}
int main()
{
demo_devices();
return 0;
}
|
simulation.c | #include "utils.h"
#include "plate.h"
#include "logging.h"
#include "simulation.h"
#include "paralellUtils.h"
#include <omp.h>
#include <stdlib.h>
#include <stdio.h>
#include <stddef.h>
#include <sys/time.h>
long measureTime(){
struct timeval time;
gettimeofday(&time, NULL);
long micros = (time.tv_sec * 1000000) + time.tv_usec;
return micros;
}
void serialSimulation(int numIterations, int numRows, int numCols, TimeResultRef timeResult){
Value newValue;
int iteration, i, j;
PlateRef oldPlate = NULL, currentPlate = NULL, switchPlate = NULL;
doLog(DEBUG, "starting serial simulation");
// define holes
Hole holes [1];
initializeHole(&(holes[0]),3,6,3,6);
// create and initialize plates
doLog(DEBUG, "creating plates structures");
oldPlate = createPlate(numRows, numCols);
currentPlate = createPlate(numRows, numCols);
doLog(DEBUG, "initlializing plate structure");
initializePlate(oldPlate, 1, holes);
if(LOG_LEVEL == DEBUG){
printPlate(oldPlate);
}
// measure total time
timeResult->totaltime = measureTime();
// update num iterations times
for(iteration=0; iteration<numIterations; iteration++){
doLog(INFO, "new iteration");
// measure iteration time
timeResult->times[iteration] = measureTime();
// update plate values
for(i=0; i < currentPlate->rows; i++){
for(j=0; j < currentPlate->cols; j++){
if(isHole(oldPlate, i, j)){
setPlateValue(currentPlate, i, j, HOLE_VALUE);
continue;
}
newValue = calculateNewTemperature(oldPlate, i, j);
setPlateValue(currentPlate, i, j, newValue);
}
}
// switch plates
switchPlate = oldPlate;
oldPlate = currentPlate;
currentPlate = switchPlate;
if(LOG_LEVEL == DEBUG){
printPlate(oldPlate);
}
// measure iteration time
timeResult->times[iteration] = measureTime() - timeResult->times[iteration];
}
// measure total time
timeResult->numThreads = 1;
timeResult->totaltime = measureTime() - timeResult->totaltime;
doLog(DEBUG, "ending serial simulation");
}
void loopBehaviour(PlateRef oldPlate, PlateRef currentPlate, MatrixSubTask subtask){
int i,j;
Value newValue;
for(i=subtask.fromRow; i <= subtask.toRow; i++){
for(j=subtask.fromCol; j <= subtask.toCol; j++){
if(isHole(oldPlate, i, j)){
setPlateValue(currentPlate, i, j, HOLE_VALUE);
continue;
}
newValue = calculateNewTemperature(oldPlate, i, j);
setPlateValue(currentPlate, i, j, newValue);
}
}
}
void paralellSimulation(int numIterations, int numRows, int numCols, int numThreads, DivisionStructure division, TimeResultRef timeResult){
int iteration, i, j, threadId;
MatrixSubTaskRef subtasks = NULL;
PlateRef oldPlate = NULL, currentPlate = NULL, switchPlate = NULL;
doLog(DEBUG, "starting paralell simulation");
// define holes
Hole holes [1];
initializeHole(&(holes[0]),3,6,3,6);
// create and initialize plates
doLog(DEBUG, "creating plates structures");
oldPlate = createPlate(numRows, numCols);
currentPlate = createPlate(numRows, numCols);
doLog(DEBUG, "initlializing plate structure");
initializePlate(oldPlate, 1, holes);
if(LOG_LEVEL == DEBUG){
printPlate(oldPlate);
}
// define matrix divisions
subtasks = buildMatrixDivisions(numRows, numCols, numThreads, division);
// measure total time
timeResult->totaltime = measureTime();
#pragma omp parallel num_threads(numThreads)
{
// update num iterations times
for(iteration=0; iteration<numIterations; iteration++){
doLog(INFO, "new iteration");
// measure iteration time
timeResult->times[iteration] = measureTime();
// update plate values
#pragma omp for
for(i=0; i < numThreads; i++){
loopBehaviour(oldPlate, currentPlate, subtasks[i]);
}
// switch plates
doLog(DEBUG, "switching plates");
switchPlate = oldPlate;
oldPlate = currentPlate;
currentPlate = switchPlate;
doLog(INFO, "finished iteration");
if(LOG_LEVEL == DEBUG){
printPlate(oldPlate);
}
// measure iteration time
timeResult->times[iteration] = measureTime() - timeResult->times[iteration];
}
}
// measure total time
timeResult->numThreads = numThreads;
timeResult->totaltime = measureTime() - timeResult->totaltime;
doLog(DEBUG, "ending paralell simulation");
}
|
residual_based_bdf_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_RESIDUAL_BASED_BDF_SCHEME )
#define KRATOS_RESIDUAL_BASED_BDF_SCHEME
/* System includes */
/* External includes */
/* Project includes */
#include "includes/checks.h"
#include "solving_strategies/schemes/residual_based_implicit_time_scheme.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedBDFScheme
* @ingroup KratosCore
* @brief BDF integration scheme (for dynamic problems)
* @details The \f$ n \f$ order Backward Differentiation Formula (BDF) method is a two step \f$ n \f$ order accurate method.
* This scheme is designed to solve a system of the type:
*\f[
* \mathbf{M} \frac{d^2(u_{n0})}{dt^2} + \mathbf{D} \frac{d(un0)}{dt} + \mathbf{K} u_{n0} = \mathbf{f}_{ext}
* \f]
*
* If we call:
*
* - Second derivative:
* -# \f$ \ddot{u}_{ni} \f$ the second derivative at the step i
* - First derivative:
* -# \f$ \dot{u}_{ni} \f$ the first derivative at the step i
* - Third derivative:
* -# \f$ u_{ni} \f$ the variable at the step i
*
* Then we assume:
* \f[ \frac{d^2(u_{n0})}{dt^2} \|t_{n0} = \sum_i c_i \dot{u}_{ni} \f]
* \f[ \frac{d(u_{n0})}{dt} \|t_{n0} = \sum_i c_i u_{n0} \f]
* with for order 2 (BDF2):
* -# \f$ c_0 = \frac{1.5}{dt} \f$
* -# \f$ c_1 = \frac{-2.0}{dt} \f$
* -# \f$ c_2 = \frac{0.5}{dt} \f$
*
* The LHS and RHS can be defined as:
* \f[ RHS = \mathbf{f}_{ext} - \mathbf{M} \frac{d(\dot{u}_{n0})}{dt} - \mathbf{D} \frac{d(u_{n0})}{dt} - \mathbf{K} u_{n0} \f]
* and
* \f[ LHS = \frac{d(-RHS)}{d(u_{n0})} = c_0^2 \mathbf{M} + c_0 \mathbf{D} + K \f]
* @note This implies that elements are expected to be written in terms
* of a variable with two time derivatives
* <a href="https://mediatum.ub.tum.de/doc/1223319/80942.pdf">Main reference</a>
* @todo Create a BibTeX file https://www.stack.nl/~dimitri/doxygen/manual/commands.html#cmdcite
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace, class TDenseSpace>
class ResidualBasedBDFScheme
: public ResidualBasedImplicitTimeScheme<TSparseSpace, TDenseSpace>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedBDFScheme );
typedef Scheme<TSparseSpace,TDenseSpace> BaseType;
typedef typename BaseType::Pointer BaseTypePointer;
typedef ResidualBasedImplicitTimeScheme<TSparseSpace,TDenseSpace> ImplicitBaseType;
typedef typename ImplicitBaseType::TDataType TDataType;
typedef typename ImplicitBaseType::DofsArrayType DofsArrayType;
typedef typename Element::DofsVectorType DofsVectorType;
typedef typename ImplicitBaseType::TSystemMatrixType TSystemMatrixType;
typedef typename ImplicitBaseType::TSystemVectorType TSystemVectorType;
typedef typename ImplicitBaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename ImplicitBaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef ModelPart::NodesContainerType NodesArrayType;
/// Definition of epsilon
static constexpr double ZeroTolerance = std::numeric_limits<double>::epsilon();
///@}
///@name Life Cycle
///@{
/**
* @brief Constructor. The BDF method
* @param Order The integration order
* @todo The ideal would be to use directly the dof or the variable itself to identify the type of variable and is derivatives
*/
explicit ResidualBasedBDFScheme(const std::size_t Order = 2)
:ImplicitBaseType(),
mOrder(Order)
{
// Allocate auxiliary memory
const std::size_t num_threads = OpenMPUtils::GetNumThreads();
mVector.dotun0.resize(num_threads);
mVector.dot2un0.resize(num_threads);
// Doing a minimal check
KRATOS_ERROR_IF(mOrder < 1) << "ERROR:: Not possible to compute a BDF of order less than 1" << std::endl;
// We resize the BDF coefficients
if (mBDF.size() != (mOrder + 1))
mBDF.resize(mOrder + 1);
}
/** Copy Constructor.
*/
explicit ResidualBasedBDFScheme(ResidualBasedBDFScheme& rOther)
:ImplicitBaseType(rOther)
,mOrder(rOther.mOrder)
,mBDF(rOther.mBDF)
,mVector(rOther.mVector)
{
}
/**
* Clone
*/
BaseTypePointer Clone() override
{
return BaseTypePointer( new ResidualBasedBDFScheme(*this) );
}
/** Destructor.
*/
~ResidualBasedBDFScheme
() override {}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Performing the update of the solution
* @details Incremental update within newton iteration. It updates the state variables at the end of the time step
* \f[ u_{n+1}^{k+1}= u_{n+1}^{k}+ \Delta u\f]
* @param rModelPart The model of the problem to solve
* @param rDofSet Set of all primary variables
* @param rA LHS matrix
* @param rDx incremental update of primary variables
* @param rb RHS Vector
*/
void Update(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
// Update of displacement (by DOF)
mpDofUpdater->UpdateDofs(rDofSet, rDx);
UpdateDerivatives(rModelPart, rDofSet, rA, rDx, rb);
KRATOS_CATCH( "" );
}
/**
* @brief Performing the prediction of the solution
* @details It predicts the solution for the current step x = xold + vold * Dt
* @param rModelPart The model of the problem to solve
* @param rDofSet set of all primary variables
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
*/
void Predict(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
KRATOS_ERROR << "Calling base BDF class" << std::endl;
KRATOS_CATCH( "" );
}
/**
* @brief It initializes time step solution. Only for reasons if the time step solution is restarted
* @param rModelPart The model of the problem to solve
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
* @todo I cannot find the formula for the higher orders with variable time step. I tried to deduce by myself but the result was very unstable
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
ProcessInfo& r_previous_process_info = r_current_process_info.GetPreviousTimeStepInfo(1);
ImplicitBaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb);
const double delta_time = r_current_process_info[DELTA_TIME];
double previous_delta_time = r_previous_process_info[DELTA_TIME];
KRATOS_ERROR_IF(delta_time < ZeroTolerance) << "Detected delta_time equal to zero or negative in the Solution Scheme DELTA_TIME: " << delta_time << ". PLEASE : check if the time step is created correctly for the current time step" << std::endl;
KRATOS_WARNING_IF("ResidualBasedBDFScheme", previous_delta_time < ZeroTolerance) << "Detected previous_delta_time equal to zero or negative in the Solution Scheme DELTA_TIME: " << previous_delta_time << ". PLEASE : check if the time step is created correctly for the previous time step" << std::endl;
previous_delta_time = std::abs(previous_delta_time) > ZeroTolerance ? previous_delta_time : delta_time;
// Calculate the BDF coefficients
const double rho = previous_delta_time / delta_time;
double time_coeff = 0.0;
for (std::size_t i_rho = 0; i_rho < mOrder; ++i_rho)
time_coeff += delta_time * std::pow(rho, i_rho);
time_coeff = 1.0/time_coeff;
// We compute the BDF coefficients
switch(mOrder) {
case 1 : mBDF[0] = time_coeff * rho; //coefficient for step n+1 (1Dt if Dt is constant)
mBDF[1] = -time_coeff * rho; //coefficient for step n (-1Dt if Dt is constant)
break;
case 2 : mBDF[0] = time_coeff * (std::pow(rho, 2) + 2.0 * rho); //coefficient for step n+1 (3/2Dt if Dt is constant)
mBDF[1] = -time_coeff * (std::pow(rho, 2) + 2.0 * rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant)
mBDF[2] = time_coeff; //coefficient for step n-1 (1/2Dt if Dt is constant)
break;
case 3 : mBDF[0] = 11.0/(6.0 * delta_time); //coefficient for step n+1 (11/6Dt if Dt is constant)
mBDF[1] = -18.0/(6.0 * delta_time);; //coefficient for step n (-18/6Dt if Dt is constant)
mBDF[2] = 9.0/(6.0 * delta_time);; //coefficient for step n-1 (9/6Dt if Dt is constant)
mBDF[3] = -2.0/(6.0 * delta_time);; //coefficient for step n-2 (2/6Dt if Dt is constant)
break;
case 4 : mBDF[0] = 25.0/(12.0 * delta_time); //coefficient for step n+1 (25/12Dt if Dt is constant)
mBDF[1] = -48.0/(12.0 * delta_time); //coefficient for step n (-48/12Dt if Dt is constant)
mBDF[2] = 36.0/(12.0 * delta_time); //coefficient for step n-1 (36/12Dt if Dt is constant)
mBDF[3] = -16.0/(12.0 * delta_time); //coefficient for step n-2 (16/12Dt if Dt is constant)
mBDF[4] = 3.0/(12.0 * delta_time); //coefficient for step n-3 (3/12Dt if Dt is constant)
break;
case 5 : mBDF[0] = 137.0/(60.0 * delta_time); //coefficient for step n+1 (137/60Dt if Dt is constant)
mBDF[1] = -300.0/(60.0 * delta_time); //coefficient for step n (-300/60Dt if Dt is constant)
mBDF[2] = 300.0/(60.0 * delta_time); //coefficient for step n-1 (300/60Dt if Dt is constant)
mBDF[3] = -200.0/(60.0 * delta_time); //coefficient for step n-2 (-200/60Dt if Dt is constant)
mBDF[4] = 75.0/(60.0 * delta_time); //coefficient for step n-3 (75/60Dt if Dt is constant)
mBDF[5] = -12.0/(60.0 * delta_time); //coefficient for step n-4 (-12/60Dt if Dt is constant)
break;
case 6 : mBDF[0] = 147.0/(60.0 * delta_time); //coefficient for step n+1 (147/60Dt if Dt is constant)
mBDF[1] = -360.0/(60.0 * delta_time); //coefficient for step n (-360/60Dt if Dt is constant)
mBDF[2] = 450.0/(60.0 * delta_time); //coefficient for step n-1 (450/60Dt if Dt is constant)
mBDF[3] = -400.0/(60.0 * delta_time); //coefficient for step n-2 (-400/60Dt if Dt is constant)
mBDF[4] = 225.0/(60.0 * delta_time); //coefficient for step n-3 (225/60Dt if Dt is constant)
mBDF[5] = -72.0/(60.0 * delta_time); //coefficient for step n-4 (-72/60Dt if Dt is constant)
mBDF[6] = 10.0/(60.0 * delta_time); //coefficient for step n-5 (10/60Dt if Dt is constant)
break;
default : KRATOS_ERROR << "Methods with order > 6 are not zero-stable so they cannot be used" << std::endl;
}
const double tolerance = 1.0e-24;
KRATOS_WARNING_IF("ResidualBasedBDFScheme", mOrder > 2 && std::abs(delta_time - previous_delta_time) > tolerance) << "For higher orders than 2 the time step is assumed to be constant. Sorry for the inconveniences" << std::endl;
// Adding to the process info
Vector bdf_vector(mOrder + 1);
for (std::size_t i_order = 0; i_order < mOrder + 1; ++i_order)
bdf_vector[i_order] = mBDF[i_order];
r_current_process_info.SetValue(BDF_COEFFICIENTS, bdf_vector);
KRATOS_CATCH( "" );
}
/**
* @brief This function is designed to be called once to perform all the checks needed on the input provided.
* @details Checks can be "expensive" as the function is designed to catch user's errors.
* @param rModelPart The model of the problem to solve
* @return Zero means all ok
*/
int Check(ModelPart& rModelPart) override
{
KRATOS_TRY;
const int err = ImplicitBaseType::Check(rModelPart);
if(err!=0) return err;
// Check for minimum value of the buffer index
// Verify buffer size
KRATOS_ERROR_IF(rModelPart.GetBufferSize() < mOrder + 1) << "Insufficient buffer size. Buffer size should be greater than" << mOrder + 1 << ". Current size is" << rModelPart.GetBufferSize() << std::endl;
KRATOS_CATCH( "" );
return 0;
}
/// Free memory allocated by this class.
void Clear() override
{
this->mpDofUpdater->Clear();
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedBDFScheme";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
struct GeneralVectors
{
std::vector< Vector > dotun0; /// First derivative
std::vector< Vector > dot2un0; /// Second derivative
};
const std::size_t mOrder; /// The integration order
Vector mBDF; /// The BDF coefficients
GeneralVectors mVector; /// The structure containing the derivatives
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief Performing the update of the derivatives
* @param rModelPart The model of the problem to solve
* @param rDofSet Set of all primary variables
* @param rA LHS matrix
* @param rDx incremental update of primary variables
* @param rb RHS Vector
*/
inline void UpdateDerivatives(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
)
{
// Updating time derivatives (nodally for efficiency)
const int num_nodes = static_cast<int>( rModelPart.Nodes().size() );
// Getting first node iterator
const auto it_node_begin = rModelPart.Nodes().begin();
#pragma omp parallel for
for(int i = 0; i< num_nodes; ++i) {
auto it_node = it_node_begin + i;
UpdateFirstDerivative(it_node);
UpdateSecondDerivative(it_node);
}
}
/**
* @brief Updating first time derivative (velocity)
* @param itNode the node interator
*/
virtual inline void UpdateFirstDerivative(NodesArrayType::iterator itNode)
{
KRATOS_ERROR << "Calling base BDF class" << std::endl;
}
/**
* @brief Updating second time derivative (acceleration)
* @param itNode the node interator
*/
virtual inline void UpdateSecondDerivative(NodesArrayType::iterator itNode)
{
KRATOS_ERROR << "Calling base BDF class" << std::endl;
}
/**
* @brief It adds the dynamic LHS contribution of the elements
* \f[ LHS = \frac{d(-RHS)}{d(u_{n0})} = c_0^2\mathbf{M} + c_0 \mathbf{D} + \mathbf{K} \f]
* @param rLHS_Contribution The dynamic contribution for the LHS
* @param rD The damping matrix
* @param rM The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToLHS(
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
ProcessInfo& rCurrentProcessInfo
) override
{
// Adding mass contribution to the dynamic stiffness
if (rM.size1() != 0) { // if M matrix declared
noalias(rLHS_Contribution) += rM * std::pow(mBDF[0], 2);
}
// Adding damping contribution
if (rD.size1() != 0) { // if D matrix declared
noalias(rLHS_Contribution) += rD * mBDF[0];
}
}
/**
* @brief It adds the dynamic RHS contribution of the objects
* \f[ \mathbf{b} - \mathbf{M} a - \mathbf{D} v \f]
* @param rObject The object to compute
* @param rRHS_Contribution The dynamic contribution for the RHS
* @param rD The damping matrix
* @param rM The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
template <typename TObjectType>
void TemplateAddDynamicsToRHS(
TObjectType rObject,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
ProcessInfo& rCurrentProcessInfo
)
{
const std::size_t this_thread = OpenMPUtils::ThisThread();
// Adding inertia contribution
if (rM.size1() != 0) {
rObject->GetSecondDerivativesVector(mVector.dot2un0[this_thread], 0);
noalias(rRHS_Contribution) -= prod(rM, mVector.dot2un0[this_thread]);
}
// Adding damping contribution
if (rD.size1() != 0) {
rObject->GetFirstDerivativesVector(mVector.dotun0[this_thread], 0);
noalias(rRHS_Contribution) -= prod(rD, mVector.dotun0[this_thread]);
}
}
/**
* @brief It adds the dynamic RHS contribution of the elements
* \f[ \mathbf{b} - \mathbf{M} a - \mathbf{D} v \f]
* @param pElement The element to compute
* @param RHS_Contribution The dynamic contribution for the RHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToRHS(
Element::Pointer pElement,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
ProcessInfo& rCurrentProcessInfo
) override
{
TemplateAddDynamicsToRHS<Element::Pointer>(pElement, rRHS_Contribution, rD, rM, rCurrentProcessInfo);
}
/**
* @brief It adds the dynamic RHS contribution of the condition
* \f[ RHS = f_{ext} - \ddot{u}_{n0} \mathbf{M} + \dot{u}_{n0} \mathbf{D} + u_{n0} \mathbf{K} \f]
* @param pCondition The condition to compute
* @param RHS_Contribution The dynamic contribution for the RHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToRHS(
Condition::Pointer pCondition,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
ProcessInfo& rCurrentProcessInfo
) override
{
TemplateAddDynamicsToRHS<Condition::Pointer>(pCondition, rRHS_Contribution, rD, rM, rCurrentProcessInfo);
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
/// Utility class to perform the update after solving the system, will be different in MPI runs.
typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater();
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedBDFScheme */
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_BDF_SCHEME defined */
|
kmp_task_depend_all.c | // RUN: %libomp-compile-and-run
// The runtime currently does not get dependency information from GCC.
// UNSUPPORTED: gcc
// Tests OMP 5.x task dependence "omp_all_memory",
// emulates compiler codegen versions for new dep kind
//
// Task tree created:
// task0 - task1 (in: i1, i2)
// \
// task2 (inoutset: i2), (in: i1)
// /
// task3 (omp_all_memory) via flag=0x80
// /
// task4 - task5 (in: i1, i2)
// /
// task6 (omp_all_memory) via addr=-1
// /
// task7 (omp_all_memory) via flag=0x80
// /
// task8 (in: i3)
//
#include <stdio.h>
#include <omp.h>
#ifdef _WIN32
#include <windows.h>
#define mysleep(n) Sleep(n)
#else
#include <unistd.h>
#define mysleep(n) usleep((n)*1000)
#endif
// to check the # of concurrent tasks (must be 1 for MTX, <3 for other kinds)
static int checker = 0;
static int err = 0;
#ifndef DELAY
#define DELAY 100
#endif
// ---------------------------------------------------------------------------
// internal data to emulate compiler codegen
typedef struct DEP {
size_t addr;
size_t len;
unsigned char flags;
} dep;
#define DEP_ALL_MEM 0x80
typedef struct task {
void** shareds;
void* entry;
int part_id;
void* destr_thunk;
int priority;
long long device_id;
int f_priv;
} task_t;
#define TIED 1
typedef int(*entry_t)(int, task_t*);
typedef struct ID {
int reserved_1;
int flags;
int reserved_2;
int reserved_3;
char *psource;
} id;
// thunk routine for tasks with ALL dependency
int thunk_m(int gtid, task_t* ptask) {
int lcheck, th;
#pragma omp atomic capture
lcheck = ++checker;
th = omp_get_thread_num();
printf("task m_%d, th %d, checker %d\n", ptask->f_priv, th, lcheck);
if (lcheck != 1) { // no more than 1 task at a time
err++;
printf("Error m1, checker %d != 1\n", lcheck);
}
mysleep(DELAY);
#pragma omp atomic read
lcheck = checker; // must still be equal to 1
if (lcheck != 1) {
err++;
printf("Error m2, checker %d != 1\n", lcheck);
}
#pragma omp atomic
--checker;
return 0;
}
// thunk routine for tasks with inoutset dependency
int thunk_s(int gtid, task_t* ptask) {
int lcheck, th;
#pragma omp atomic capture
lcheck = ++checker; // 1
th = omp_get_thread_num();
printf("task 2_%d, th %d, checker %d\n", ptask->f_priv, th, lcheck);
if (lcheck != 1) { // no more than 1 task at a time
err++;
printf("Error s1, checker %d != 1\n", lcheck);
}
mysleep(DELAY);
#pragma omp atomic read
lcheck = checker; // must still be equal to 1
if (lcheck != 1) {
err++;
printf("Error s2, checker %d != 1\n", lcheck);
}
#pragma omp atomic
--checker;
return 0;
}
#ifdef __cplusplus
extern "C" {
#endif
int __kmpc_global_thread_num(id*);
task_t *__kmpc_omp_task_alloc(id *loc, int gtid, int flags,
size_t sz, size_t shar, entry_t rtn);
int __kmpc_omp_task_with_deps(id *loc, int gtid, task_t *task, int ndeps,
dep *dep_lst, int nd_noalias, dep *noalias_lst);
static id loc = {0, 2, 0, 0, ";file;func;0;0;;"};
#ifdef __cplusplus
} // extern "C"
#endif
// End of internal data
// ---------------------------------------------------------------------------
int main()
{
int i1,i2,i3;
omp_set_num_threads(8);
omp_set_dynamic(0);
#pragma omp parallel
{
#pragma omp single nowait
{
dep sdep[2];
task_t *ptr;
int gtid = __kmpc_global_thread_num(&loc);
int t = omp_get_thread_num();
#pragma omp task depend(in: i1, i2)
{ // task 0
int lcheck, th;
#pragma omp atomic capture
lcheck = ++checker; // 1 or 2
th = omp_get_thread_num();
printf("task 0_%d, th %d, checker %d\n", t, th, lcheck);
if (lcheck > 2 || lcheck < 1) {
err++; // no more than 2 tasks concurrently
printf("Error1, checker %d, not 1 or 2\n", lcheck);
}
mysleep(DELAY);
#pragma omp atomic read
lcheck = checker; // 1 or 2
if (lcheck > 2 || lcheck < 1) {
#pragma omp atomic
err++;
printf("Error2, checker %d, not 1 or 2\n", lcheck);
}
#pragma omp atomic
--checker;
}
#pragma omp task depend(in: i1, i2)
{ // task 1
int lcheck, th;
#pragma omp atomic capture
lcheck = ++checker; // 1 or 2
th = omp_get_thread_num();
printf("task 1_%d, th %d, checker %d\n", t, th, lcheck);
if (lcheck > 2 || lcheck < 1) {
err++; // no more than 2 tasks concurrently
printf("Error3, checker %d, not 1 or 2\n", lcheck);
}
mysleep(DELAY);
#pragma omp atomic read
lcheck = checker; // 1 or 2
if (lcheck > 2 || lcheck < 1) {
err++;
printf("Error4, checker %d, not 1 or 2\n", lcheck);
}
#pragma omp atomic
--checker;
}
// compiler codegen start
// task2
ptr = __kmpc_omp_task_alloc(&loc, gtid, TIED, sizeof(task_t), 0, thunk_s);
sdep[0].addr = (size_t)&i1;
sdep[0].len = 0; // not used
sdep[0].flags = 1; // IN
sdep[1].addr = (size_t)&i2;
sdep[1].len = 0; // not used
sdep[1].flags = 8; // INOUTSET
ptr->f_priv = t + 10; // init single first-private variable
__kmpc_omp_task_with_deps(&loc, gtid, ptr, 2, sdep, 0, 0);
// task3
ptr = __kmpc_omp_task_alloc(&loc, gtid, TIED, sizeof(task_t), 0, thunk_m);
sdep[0].addr = (size_t)&i1; // to be ignored
sdep[0].len = 0; // not used
sdep[0].flags = 1; // IN
sdep[1].addr = 0;
sdep[1].len = 0; // not used
sdep[1].flags = DEP_ALL_MEM; // omp_all_memory
ptr->f_priv = t + 20; // init single first-private variable
__kmpc_omp_task_with_deps(&loc, gtid, ptr, 2, sdep, 0, 0);
// compiler codegen end
#pragma omp task depend(in: i1, i2)
{ // task 4
int lcheck, th;
#pragma omp atomic capture
lcheck = ++checker; // 1 or 2
th = omp_get_thread_num();
printf("task 4_%d, th %d, checker %d\n", t, th, lcheck);
if (lcheck > 2 || lcheck < 1) {
err++; // no more than 2 tasks concurrently
printf("Error5, checker %d, not 1 or 2\n", lcheck);
}
mysleep(DELAY);
#pragma omp atomic read
lcheck = checker; // 1 or 2
if (lcheck > 2 || lcheck < 1) {
err++;
printf("Error6, checker %d, not 1 or 2\n", lcheck);
}
#pragma omp atomic
--checker;
}
#pragma omp task depend(in: i1, i2)
{ // task 5
int lcheck, th;
#pragma omp atomic capture
lcheck = ++checker; // 1 or 2
th = omp_get_thread_num();
printf("task 5_%d, th %d, checker %d\n", t, th, lcheck);
if (lcheck > 2 || lcheck < 1) {
err++; // no more than 2 tasks concurrently
printf("Error7, checker %d, not 1 or 2\n", lcheck);
}
mysleep(DELAY);
#pragma omp atomic read
lcheck = checker; // 1 or 2
if (lcheck > 2 || lcheck < 1) {
err++;
printf("Error8, checker %d, not 1 or 2\n", lcheck);
}
#pragma omp atomic
--checker;
}
// compiler codegen start
// task6
ptr = __kmpc_omp_task_alloc(&loc, gtid, TIED, sizeof(task_t), 0, thunk_m);
sdep[0].addr = (size_t)(-1); // omp_all_memory
sdep[0].len = 0; // not used
sdep[0].flags = 2; // OUT
ptr->f_priv = t + 30; // init single first-private variable
__kmpc_omp_task_with_deps(&loc, gtid, ptr, 1, sdep, 0, 0);
// task7
ptr = __kmpc_omp_task_alloc(&loc, gtid, TIED, sizeof(task_t), 0, thunk_m);
sdep[0].addr = 0;
sdep[0].len = 0; // not used
sdep[0].flags = DEP_ALL_MEM; // omp_all_memory
sdep[1].addr = (size_t)&i3; // to be ignored
sdep[1].len = 0; // not used
sdep[1].flags = 4; // MUTEXINOUTSET
ptr->f_priv = t + 40; // init single first-private variable
__kmpc_omp_task_with_deps(&loc, gtid, ptr, 2, sdep, 0, 0);
// compiler codegen end
#pragma omp task depend(in: i3)
{ // task 8
int lcheck, th;
#pragma omp atomic capture
lcheck = ++checker; // 1
th = omp_get_thread_num();
printf("task 8_%d, th %d, checker %d\n", t, th, lcheck);
if (lcheck != 1) {
err++;
printf("Error9, checker %d, != 1\n", lcheck);
}
mysleep(DELAY);
#pragma omp atomic read
lcheck = checker;
if (lcheck != 1) {
err++;
printf("Error10, checker %d, != 1\n", lcheck);
}
#pragma omp atomic
--checker;
}
} // single
} // parallel
if (err == 0 && checker == 0) {
printf("passed\n");
return 0;
} else {
printf("failed, err = %d, checker = %d\n", err, checker);
return 1;
}
}
|
fiducial_stereo.c | /*
* Copyright 2012, by the California Institute of Technology. ALL
* RIGHTS RESERVED. United States Government Sponsorship
* acknowledged. Any commercial use must be negotiated with the Office
* of Technology Transfer at the California Institute of Technology.
*
* This software may be subject to U.S. export control laws. By
* accepting this software, the user agrees to comply with all
* applicable U.S. export laws and regulations. User has the
* responsibility to obtain export licenses, or other export authority
* as may be required before exporting such information to foreign
* countries or providing access to foreign persons.
*/
/**
@file fiducial_stereo.c
@brief stereo msl fiducial detectors
@date 11/27/2012
@author Paul Hebert (paul.hebert@jpl.nasa.gov)
Mobility and Manipulation Group (3475), JPL
*/
#include <sys/time.h>
#include "fiducial_stereo.h"
#ifdef _OPENMP
#include <omp.h>
#endif
fiducial_stereo_t* fiducial_stereo_alloc()
{
fiducial_stereo_t* self;
self = (fiducial_stereo_t*) calloc(1,sizeof(fiducial_stereo_t));
assert(self);
int i;
for(i = 0; i < FIDUCIAL_STEREO_NUM_CAMERAS; i++)
{
self->fiducial_detector[i] = fiducial_detector_alloc();
assert(self->fiducial_detector[i]);
}
return self;
}
void fiducial_stereo_free(fiducial_stereo_t* self)
{
int i;
for(i = 0; i < FIDUCIAL_STEREO_NUM_CAMERAS; i++)
{
if(self->fiducial_detector[i])
fiducial_detector_free(self->fiducial_detector[i]);
}
if(self)
free(self);
return;
}
void fiducial_stereo_init(fiducial_stereo_t* self)
{
int i;
for(i = 0; i < FIDUCIAL_STEREO_NUM_CAMERAS; i++)
{
fiducial_detector_init(self->fiducial_detector[i]);
}
self->initial_fiducial_pose = fiducial_pose_ident();
self->fiducial_pose = fiducial_pose_ident();
return;
}
fiducial_detector_error_t fiducial_stereo_set_camera_models(fiducial_stereo_t* self, fiducial_stereo_cam_model_t* left_cam, fiducial_stereo_cam_model_t* right_cam )
{
if( fiducial_detector_set_camera_models(self->fiducial_detector[FIDUCIAL_STEREO_LEFT], left_cam) != FIDUCIAL_DETECTOR_OK )
return FIDUCIAL_DETECTOR_ERR;
if( fiducial_detector_set_camera_models(self->fiducial_detector[FIDUCIAL_STEREO_RIGHT], right_cam) != FIDUCIAL_DETECTOR_OK )
return FIDUCIAL_DETECTOR_ERR;
return FIDUCIAL_DETECTOR_OK;
}
fiducial_detector_error_t fiducial_stereo_get_fiducial_pose(fiducial_stereo_t* self, fiducial_pose_t* fd_pose)
{
fiducial_detector_t* left = self->fiducial_detector[FIDUCIAL_STEREO_LEFT];
fiducial_detector_t* right = self->fiducial_detector[FIDUCIAL_STEREO_RIGHT];
if(!left->fiducial_projected || !right->fiducial_projected)
return FIDUCIAL_DETECTOR_ERR;
// Set final pose intially to initial pose
self->fiducial_pose = self->initial_fiducial_pose;
left->fiducial_pose = left->initial_fiducial_pose;
right->fiducial_pose = right->initial_fiducial_pose;
double disparity;
disparity = left->fiducial_location.x - right->fiducial_location.x;
fiducial_vec_t pw;
double focal_length;
double baseline;
focal_length = left->camera.focal_length_x;
baseline = right->camera.transform[0][3];
// fprintf(stderr,"baseline: %f focal_length: %f disparity: %f image center row: %f col: %f image location row: %f col: %f\n", baseline, focal_length, disparity, left->camera.image_center_y, left->camera.image_center_x, left->fiducial_location.y, left->fiducial_location.x);
pw.z = focal_length * baseline / disparity;
pw.y = (-left->camera.image_center_y + left->fiducial_location.y) * pw.z / focal_length;
pw.x = (-left->camera.image_center_x + left->fiducial_location.x) * pw.z / focal_length;
pw = fiducial_vec_transform(fiducial_pose_from_transform(left->camera.transform), pw);
// fprintf(stderr,"world pt - x: %f y: %f z: %f\n", pw.x, pw.y, pw.z);
// Check if the found location is valid ( must be within 10cm )
if(fiducial_vec_mag(fiducial_vec_sub(pw, left->initial_fiducial_pose.pos)) < left->params.dist_thresh)
{
self->fiducial_pose.pos = pw;
left->fiducial_pose.pos = pw;
right->fiducial_pose.pos = pw;
fd_pose->pos.x = self->fiducial_pose.pos.x;
fd_pose->pos.y = self->fiducial_pose.pos.y;
fd_pose->pos.z = self->fiducial_pose.pos.z;
fd_pose->rot.u = self->fiducial_pose.rot.u;
fd_pose->rot.x = self->fiducial_pose.rot.x;
fd_pose->rot.y = self->fiducial_pose.rot.y;
fd_pose->rot.z = self->fiducial_pose.rot.z;
return FIDUCIAL_DETECTOR_OK;
}
fd_pose->pos.x = self->fiducial_pose.pos.x;
fd_pose->pos.y = self->fiducial_pose.pos.y;
fd_pose->pos.z = self->fiducial_pose.pos.z;
fd_pose->rot.u = self->fiducial_pose.rot.u;
fd_pose->rot.x = self->fiducial_pose.rot.x;
fd_pose->rot.y = self->fiducial_pose.rot.y;
fd_pose->rot.z = self->fiducial_pose.rot.z;
//fprintf(stderr,"outside limit\n");
return FIDUCIAL_DETECTOR_ERR;
}
fiducial_detector_error_t fiducial_stereo_draw_fiducials(fiducial_stereo_t* self,
uint8_t* left_image_data,
uint8_t* right_image_data,
int image_cols, int image_rows, int image_channels)
{
int c = 0;
// Setup image data;
uint8_t* image_data[FIDUCIAL_STEREO_NUM_CAMERAS] = {NULL};
image_data[FIDUCIAL_STEREO_LEFT] = left_image_data;
image_data[FIDUCIAL_STEREO_RIGHT] = right_image_data;
// Setup error;
fiducial_detector_error_t err[FIDUCIAL_STEREO_NUM_CAMERAS];
// Setup image information
int cols[FIDUCIAL_STEREO_NUM_CAMERAS];
cols[FIDUCIAL_STEREO_LEFT] = image_cols;
cols[FIDUCIAL_STEREO_RIGHT] = image_cols;
int rows[FIDUCIAL_STEREO_NUM_CAMERAS];
rows[FIDUCIAL_STEREO_LEFT] = image_rows;
rows[FIDUCIAL_STEREO_RIGHT] = image_rows;
int channels[FIDUCIAL_STEREO_NUM_CAMERAS];
channels[FIDUCIAL_STEREO_LEFT] = image_channels;
channels[FIDUCIAL_STEREO_RIGHT] = image_channels;
// Setup num threads
omp_set_num_threads(2);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(c = 0; c < FIDUCIAL_STEREO_NUM_CAMERAS; c++)
{
err[c] = fiducial_detector_draw_fiducial(self->fiducial_detector[c],
image_data[c],
cols[c], rows[c], channels[c]);
}
// Check errors
for(c = 0; c < FIDUCIAL_STEREO_NUM_CAMERAS; c++)
{
if( err[c] != FIDUCIAL_DETECTOR_OK )
{
//fprintf(stderr," Error in drawing");
return FIDUCIAL_DETECTOR_ERR;
}
}
return FIDUCIAL_DETECTOR_OK;
}
fiducial_detector_error_t fiducial_stereo_process(fiducial_stereo_t* self,
uint8_t* left_image_data,
uint8_t* right_image_data,
int image_cols, int image_rows, int image_channels,
fiducial_pose_t initial_pose,
fiducial_pose_t *found_pose,
float *left_score,
float *right_score,
bool full_pose)
{
int c = 0;
float score[FIDUCIAL_STEREO_NUM_CAMERAS] = {0.0};
// Setup initial pose;
self->initial_fiducial_pose = initial_pose;
fiducial_pose_t initial_fd_pose[FIDUCIAL_STEREO_NUM_CAMERAS];
initial_fd_pose[FIDUCIAL_STEREO_LEFT] = initial_pose;
initial_fd_pose[FIDUCIAL_STEREO_RIGHT] = initial_pose;
// Setup image data;
uint8_t* image_data[FIDUCIAL_STEREO_NUM_CAMERAS] = {NULL};
image_data[FIDUCIAL_STEREO_LEFT] = left_image_data;
image_data[FIDUCIAL_STEREO_RIGHT] = right_image_data;
// Setup error;
fiducial_detector_error_t err[FIDUCIAL_STEREO_NUM_CAMERAS];
// Setup image information
int cols[FIDUCIAL_STEREO_NUM_CAMERAS];
cols[FIDUCIAL_STEREO_LEFT] = image_cols;
cols[FIDUCIAL_STEREO_RIGHT] = image_cols;
int rows[FIDUCIAL_STEREO_NUM_CAMERAS];
rows[FIDUCIAL_STEREO_LEFT] = image_rows;
rows[FIDUCIAL_STEREO_RIGHT] = image_rows;
int channels[FIDUCIAL_STEREO_NUM_CAMERAS];
channels[FIDUCIAL_STEREO_LEFT] = image_channels;
channels[FIDUCIAL_STEREO_RIGHT] = image_channels;
#ifdef _OPENMP
// Setup num threads
omp_set_num_threads(2);
#endif
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(c = 0; c < FIDUCIAL_STEREO_NUM_CAMERAS; c++)
{
// Match in left and right images
err[c] = fiducial_detector_match(self->fiducial_detector[c], image_data[c], cols[c], rows[c], channels[c], initial_fd_pose[c], &score[c]);
}
*left_score = score[FIDUCIAL_STEREO_LEFT];
*right_score = score[FIDUCIAL_STEREO_RIGHT];
// Check errors
for(c = 0; c < FIDUCIAL_STEREO_NUM_CAMERAS; c++)
{
if( err[c] != FIDUCIAL_DETECTOR_OK )
{
return FIDUCIAL_DETECTOR_ERR;
}
}
// Get the fiducial pose based on LEFT and RIGHT images (position only - x,y,z)
if( fiducial_stereo_get_fiducial_pose(self, found_pose) != FIDUCIAL_DETECTOR_OK )
return FIDUCIAL_DETECTOR_ERR;
// Get a refined fiducial 6DOF pose via gradient descent (using left image)
if(full_pose)
{
if( fiducial_detector_gradient_descent(self->fiducial_detector[0], image_data[0], cols[0], rows[0], channels[0], 100, 0.001, 0.0025, found_pose, left_score) != FIDUCIAL_DETECTOR_OK )
return FIDUCIAL_DETECTOR_ERR;
// Set the right detector pose to the refined one
self->fiducial_detector[FIDUCIAL_STEREO_RIGHT]->fiducial_pose = *found_pose;
}
// Set the fiducial pose
self->fiducial_pose = *found_pose;
return FIDUCIAL_DETECTOR_OK;
}
|
openmp.h | /*
This file is part of Primer Pooler (c) Silas S. Brown. For Wen.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef OPENMP_H
#define OPENMP_H
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_max_threads() 1
#define omp_get_num_threads() 1
#define omp_get_thread_num() 0
#define omp_set_num_threads(x) (void)0
#endif
static inline void* wrapped_memcpy(void *a,const void *b,size_t n) {
/* work around Apple compiler bug in Mac OS 10.7:
if memcpy is called from a function that's doing
OpenMP stuff, get linker errors. So wrap it (but
wrapping it in an INLINE function seems to work!) */
return memcpy(a,b,n);
}
#include "random.h"
static inline int ThreadRand() {
int tNum = omp_get_thread_num(); /* this might not be unique if ThreadRand is called from inside NESTED parallelism, but we don't do that */
if(!tNum) return rand();
static RandState *states = NULL;
if (!states) {
#ifdef _OPENMP
#pragma omp critical
#endif
if (!states) {
/* How many threads shall we leave room for?
omp_get_max_threads() works BEFORE you start a
parallel region, but not INSIDE it (it returns 1
or however many threads you can NOW start).
omp_get_num_procs() might not return a high
enough number if someone set more threads than
cores (not great for this application though).
omp_get_num_threads() does what we want, as long
as the parallel region has definitely started and
we won't increase it later. We've already
established that we're in the parallel region
via the above tNum test, so let's use that.
*/
int nStates = omp_get_num_threads()-1;
states = malloc(sizeof(RandState)*nStates);
if(states) {
int r = rand(), i;
for(i=0; i<nStates; i++) states[i] = r++;
}
}
if(!states) /* aaaargh! */ return rand();
} return rand_r(states+(tNum-1));
}
#endif
|
explicit_solver_strategy.h | //
// Authors:
// Miguel Angel Celigueta maceli@cimne.upc.edu
// Miquel Santasusana msantasusana@cimne.upc.edu
//
#if !defined(KRATOS_EXPLICIT_SOLVER_STRATEGY)
#define KRATOS_EXPLICIT_SOLVER_STRATEGY
// Project includes
#include "utilities/timer.h"
#include "custom_elements/Particle_Contact_Element.h"
#include "includes/variables.h"
#include "includes/deprecated_variables.h"
/* System includes */
#include <limits>
#include <iostream>
#include <iomanip>
#include <time.h>
/* External includes */
#ifdef _OPENMP
#include <omp.h>
#endif
#define CUSTOMTIMER 0 // ACTIVATES AND DISABLES ::TIMER:::::
#include "includes/define.h"
#include "utilities/openmp_utils.h"
#include "includes/model_part.h"
#include "solving_strategies/strategies/solving_strategy.h"
#include "solving_strategies/schemes/scheme.h"
#include "custom_strategies/schemes/dem_integration_scheme.h"
#include "custom_utilities/create_and_destroy.h"
#include "custom_utilities/dem_fem_utilities.h"
#include "custom_utilities/GeometryFunctions.h"
#include "custom_utilities/inlet.h"
#include "custom_elements/cluster3D.h"
#include "custom_elements/rigid_body_element.h"
////Cfeng
#include "custom_utilities/dem_fem_search.h"
#include "custom_utilities/discrete_particle_configure.h"
#include "custom_utilities/rigid_face_geometrical_object_configure.h"
#ifdef USING_CGAL
#include <CGAL/spatial_sort.h>
#endif
/* Timer defines */
#ifdef CUSTOMTIMER
#define KRATOS_TIMER_START(t) Timer::Start(t);
#define KRATOS_TIMER_STOP(t) Timer::Stop(t);
#else
#define KRATOS_TIMER_START(t)
#define KRATOS_TIMER_STOP(t)
#endif
namespace Kratos {
class ExplicitSolverSettings {
public:
KRATOS_CLASS_POINTER_DEFINITION(ExplicitSolverSettings);
ExplicitSolverSettings() {
}
~ExplicitSolverSettings() {
}
ModelPart* r_model_part;
ModelPart* contact_model_part;
ModelPart* fem_model_part;
ModelPart* cluster_model_part;
ModelPart* inlet_model_part;
};
class KRATOS_API(DEM_APPLICATION) ExplicitSolverStrategy {
public:
typedef ModelPart::NodesContainerType NodesArrayType;
typedef ModelPart::ElementsContainerType ElementsArrayType;
typedef ElementsArrayType::iterator ElementsIterator;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef ModelPart::NodesContainerType::ContainerType NodesContainerType;
typedef ModelPart::ElementsContainerType::ContainerType ElementsContainerType;
typedef ModelPart::ConditionsContainerType::ContainerType ConditionsContainerType;
typedef SpatialSearch::ResultElementsContainerType ResultElementsContainerType;
typedef SpatialSearch::VectorResultElementsContainerType VectorResultElementsContainerType;
typedef SpatialSearch::RadiusArrayType RadiusArrayType;
typedef SpatialSearch::DistanceType DistanceType;
typedef SpatialSearch::VectorDistanceType VectorDistanceType;
typedef SpatialSearch::ResultConditionsContainerType ResultConditionsContainerType;
typedef SpatialSearch::VectorResultConditionsContainerType VectorResultConditionsContainerType;
typedef PointerVectorSet<Properties, IndexedObject> PropertiesContainerType;
typedef PropertiesContainerType::iterator PropertiesIterator;
typedef DiscreteParticleConfigure<3> ElementConfigureType;
typedef RigidFaceGeometricalObjectConfigure<3> RigidFaceGeometricalConfigureType;
typedef Kratos::VariableComponent<Kratos::VectorComponentAdaptor<Kratos::array_1d<double, 3ul> > > ComponentOf3ComponentsVariableType;
/// Pointer definition of ExplicitSolverStrategy
KRATOS_CLASS_POINTER_DEFINITION(ExplicitSolverStrategy);
ExplicitSolverStrategy() {
}
ExplicitSolverStrategy(ExplicitSolverSettings& settings,
const double max_delta_time,
const int n_step_search,
const double safety_factor,
const int delta_option,
ParticleCreatorDestructor::Pointer p_creator_destructor,
DEM_FEM_Search::Pointer p_dem_fem_search,
SpatialSearch::Pointer pSpSearch,
Parameters strategy_parameters) {
mParameters = strategy_parameters;
mDeltaOption = delta_option;
mpParticleCreatorDestructor = p_creator_destructor;
mpDemFemSearch = p_dem_fem_search;
mpSpSearch = pSpSearch;
if(mParameters["do_search_neighbours"].GetBool()) mDoSearchNeighbourElements = true;
else mDoSearchNeighbourElements = false;
p_creator_destructor->SetDoSearchNeighbourElements(mDoSearchNeighbourElements);
mMaxTimeStep = max_delta_time;
mNStepSearch = n_step_search;
mSafetyFactor = safety_factor;
mpDem_model_part = &(*(settings.r_model_part));
if (mpDem_model_part == NULL)
KRATOS_THROW_ERROR(std::runtime_error, "Undefined settings.r_model_part in ExplicitSolverStrategy constructor", "")
mpContact_model_part = &(*(settings.contact_model_part));
if (mpContact_model_part == NULL)
KRATOS_THROW_ERROR(std::runtime_error, "Undefined settings.contact_model_part in ExplicitSolverStrategy constructor", "")
mpFem_model_part = &(*(settings.fem_model_part));
if (mpFem_model_part == NULL)
KRATOS_THROW_ERROR(std::runtime_error, "Undefined settings.fem_model_part in ExplicitSolverStrategy constructor", "")
mpCluster_model_part = &(*(settings.cluster_model_part));
if (mpCluster_model_part == NULL)
KRATOS_THROW_ERROR(std::runtime_error, "Undefined settings.cluster_model_part in ExplicitSolverStrategy constructor", "")
mpInlet_model_part = &(*(settings.inlet_model_part));
if (mpInlet_model_part == NULL)
KRATOS_THROW_ERROR(std::runtime_error, "Undefined settings.inlet_model_part in ExplicitSolverStrategy constructor", "")
if(mParameters["RemoveBallsInitiallyTouchingWalls"].GetBool()) mRemoveBallsInitiallyTouchingWallsOption = true;
else mRemoveBallsInitiallyTouchingWallsOption = false;
}
/// Destructor.
virtual ~ExplicitSolverStrategy() {
//Timer::SetOuputFile("TimesPartialRelease");
//Timer::PrintTimingInformation();
}
struct LessX {
bool operator()(const SphericParticle* p, const SphericParticle* q) const {return p->GetGeometry()[0].Coordinates()[0] < q->GetGeometry()[0].Coordinates()[0];}
};
struct LessY {
bool operator()(const SphericParticle* p, const SphericParticle* q) const {return p->GetGeometry()[0].Coordinates()[1] < q->GetGeometry()[0].Coordinates()[1];}
};
struct LessZ {
bool operator()(const SphericParticle* p, const SphericParticle* q) const {return p->GetGeometry()[0].Coordinates()[2] < q->GetGeometry()[0].Coordinates()[2];}
};
struct SpatialSortingTraits {
typedef SphericParticle* Point_2;
typedef LessX Less_x_2;
typedef LessY Less_y_2;
typedef LessZ Less_z_2;
Less_x_2 less_x_2_object() const {return Less_x_2();}
Less_y_2 less_y_2_object() const {return Less_y_2();}
Less_z_2 less_z_2_object() const { return Less_z_2();}
};
#ifdef USING_CGAL
void ReorderParticles() {
SpatialSortingTraits sst;
CGAL::spatial_sort(mListOfSphericParticles.begin(), mListOfSphericParticles.end(), sst);
}
#endif
template <class T>
void RebuildListOfSphericParticles(ElementsArrayType& pElements, std::vector<T*>& rCustomListOfParticles){
KRATOS_TRY
rCustomListOfParticles.resize(pElements.size());
#pragma omp parallel for
for (int k = 0; k < (int)pElements.size(); k++){
ElementsArrayType::iterator particle_pointer_it = pElements.ptr_begin() + k;
T* spheric_particle = dynamic_cast<T*>(&(*particle_pointer_it));
rCustomListOfParticles[k] = spheric_particle;
}
return;
KRATOS_CATCH("")
}
void RebuildListOfDiscontinuumSphericParticles() {
RebuildListOfSphericParticles<SphericParticle>(GetModelPart().GetCommunicator().LocalMesh().Elements(), mListOfSphericParticles);
}
void RebuildPropertiesProxyPointers(std::vector<SphericParticle*>& rCustomListOfSphericParticles);
void SendProcessInfoToClustersModelPart();
void UpdateMaxIdOfCreatorDestructor();
void RepairPointersToNormalProperties(std::vector<SphericParticle*>& rCustomListOfSphericParticles);
virtual void Initialize();
virtual void AttachSpheresToStickyWalls();
virtual void DisplayThreadInfo();
virtual void CalculateMaxTimeStep();
double CalculateMaxInletTimeStep();
virtual void InitializeClusters();
virtual void GetClustersForce();
virtual void GetRigidBodyElementsForce();
virtual double SolveSolutionStep();
void SearchDEMOperations(ModelPart& r_model_part, bool has_mpi = true);
void SearchFEMOperations(ModelPart& r_model_part, bool has_mpi = true) ;
virtual void ForceOperations(ModelPart& r_model_part);
void InitialTimeStepCalculation(); //TODO: remove this one
void GetForce();
void FastGetForce();
virtual void PerformTimeIntegrationOfMotion(int StepFlag = 0);
void InitializeSolutionStep();
virtual void BoundingBoxUtility(bool is_time_to_mark_and_remove = true);
virtual void FinalizeSolutionStep();
void InitializeElements();
void InitializeDEMElements();
void InitializeFEMElements();
//void InitializeRigidBodyElements();
void InitializeFEMWallsAsRigidBodyElements(ModelPart::SubModelPartsContainerType::iterator& sub_model_part);
void MarkToDeleteAllSpheresInitiallyIndentedWithFEM(ModelPart& rSpheresModelPart);
void ComputeNodalArea();
void ComputeNormalPressureVectorField();
virtual void CalculateConditionsRHSAndAdd();
void ClearFEMForces();
void CalculateNodalPressuresAndStressesOnWalls();
void SetFlagAndVariableToNodes(const Kratos::Flags& r_flag_name, ComponentOf3ComponentsVariableType& r_variable_to_set, const double value, NodesArrayType& r_nodes_array);
void SetVariableToNodes(ComponentOf3ComponentsVariableType& r_variable_to_set, const double value, NodesArrayType& r_nodes_array);
void ResetPrescribedMotionFlagsRespectingImposedDofs();
void ApplyPrescribedBoundaryConditions();
void ApplyInitialConditions();
void SetSearchRadiiOnAllParticles(ModelPart& r_model_part, const double added_search_distance = 0.0, const double amplification = 1.0);
void SetNormalRadiiOnAllParticles(ModelPart& r_model_part);
void SetSearchRadiiWithFemOnAllParticles(ModelPart& r_model_part, const double added_search_distance = 0.0, const double amplification = 1.0);
virtual void SearchNeighbours();
virtual void ComputeNewNeighboursHistoricalData();
virtual void CreateContactElements();
void InitializeContactElements();
// void ContactInitializeSolutionStep();
void PrepareContactElementsForPrinting();
virtual void ComputeNewRigidFaceNeighboursHistoricalData();
virtual void SearchRigidFaceNeighbours();
void CheckHierarchyWithCurrentNeighbours();
/* This should work only with one iteration, but it with mpi does not */
void CalculateInitialMaxIndentations(ProcessInfo& r_process_info);
void PrepareContactModelPart(ModelPart& r_model_part, ModelPart& mcontacts_model_part);
void PrepareElementsForPrinting();
void SynchronizeHistoricalVariables(ModelPart& r_model_part);
void SynchronizeRHS(ModelPart& r_model_part);
void CleanEnergies();
ModelPart& GetModelPart() { return (*mpDem_model_part);}
ModelPart& GetFemModelPart() { return (*mpFem_model_part);}
ModelPart& GetContactModelPart() { return (*mpContact_model_part);}
ModelPart& GetClusterModelPart() { return (*mpCluster_model_part);}
ModelPart& GetInletModelPart() { return (*mpInlet_model_part);}
ModelPart& GetRigidBodyModelPart() { return (*mpRigidBody_model_part);}
VectorResultElementsContainerType& GetResults() { return (mResults);}
VectorDistanceType& GetResultsDistances() { return (mResultsDistances);}
RadiusArrayType& GetArrayOfAmplifiedRadii() { return (mArrayOfAmplifiedRadii);}
int& GetNStepSearch() { return (mNStepSearch);}
int& GetSearchControl() { return mSearchControl;}
int& GetNumberOfThreads() { return (mNumberOfThreads);}
double& GetMaxTimeStep() { return (mMaxTimeStep);}
double& GetSafetyFactor() { return (mSafetyFactor);}
int& GetDeltaOption() { return (mDeltaOption);}
std::vector<unsigned int>& GetElementPartition() { return (mElementPartition);}
ParticleCreatorDestructor::Pointer& GetParticleCreatorDestructor() { return (mpParticleCreatorDestructor);}
SpatialSearch::Pointer& GetSpSearch() { return (mpSpSearch);}
VectorResultConditionsContainerType& GetRigidFaceResults() { return (mRigidFaceResults);}
VectorDistanceType& GetRigidFaceResultsDistances() { return (mRigidFaceResultsDistances);}
std::vector<unsigned int>& GetConditionPartition() { return (mConditionPartition);}
DEM_FEM_Search::Pointer& GetDemFemSearch() { return (mpDemFemSearch);}
virtual ElementsArrayType& GetElements(ModelPart& r_model_part) { return r_model_part.GetCommunicator().LocalMesh().Elements();}
virtual ElementsArrayType& GetAllElements(ModelPart& r_model_part) {
return r_model_part.Elements();
}
protected:
Parameters mParameters;
bool mRemoveBallsInitiallyTouchingWallsOption;
VectorResultElementsContainerType mResults;
VectorDistanceType mResultsDistances;
RadiusArrayType mArrayOfAmplifiedRadii;
int mNStepSearch;
int mSearchControl;
int mNumberOfThreads;
double mMaxTimeStep;
double mSafetyFactor;
int mDeltaOption;
std::vector<unsigned int> mElementPartition;
ParticleCreatorDestructor::Pointer mpParticleCreatorDestructor;
DEM_FEM_Search::Pointer mpDemFemSearch;
SpatialSearch::Pointer mpSpSearch;
bool mDoSearchNeighbourElements;
VectorResultConditionsContainerType mRigidFaceResults;
VectorDistanceType mRigidFaceResultsDistances;
std::vector<unsigned int> mConditionPartition;
ModelPart *mpFem_model_part;
ModelPart *mpDem_model_part;
ModelPart *mpInlet_model_part;
ModelPart *mpContact_model_part;
ModelPart *mpCluster_model_part;
ModelPart *mpRigidBody_model_part;
std::vector<SphericParticle*> mListOfSphericParticles;
std::vector<SphericParticle*> mListOfGhostSphericParticles;
}; // Class ExplicitSolverStrategy
} // namespace Kratos.
#endif // KRATOS_EXPLICIT_SOLVER_STRATEGY defined
|
OpenMPClause.h | //===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file defines OpenMP AST classes for clauses.
/// There are clauses for executable directives, clauses for declarative
/// directives and clauses which can be used in both kinds of directives.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H
#define LLVM_CLANG_AST_OPENMPCLAUSE_H
#include "clang/AST/Decl.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/Frontend/OpenMP/OMPContext.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/TrailingObjects.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <utility>
namespace clang {
class ASTContext;
//===----------------------------------------------------------------------===//
// AST classes for clauses.
//===----------------------------------------------------------------------===//
/// This is a basic class for representing single OpenMP clause.
class OMPClause {
/// Starting location of the clause (the clause keyword).
SourceLocation StartLoc;
/// Ending location of the clause.
SourceLocation EndLoc;
/// Kind of the clause.
OpenMPClauseKind Kind;
protected:
OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc)
: StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {}
public:
/// Returns the starting location of the clause.
SourceLocation getBeginLoc() const { return StartLoc; }
/// Returns the ending location of the clause.
SourceLocation getEndLoc() const { return EndLoc; }
/// Sets the starting location of the clause.
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// Sets the ending location of the clause.
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// Returns kind of OpenMP clause (private, shared, reduction, etc.).
OpenMPClauseKind getClauseKind() const { return Kind; }
bool isImplicit() const { return StartLoc.isInvalid(); }
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<OMPClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
/// Get the iterator range for the expressions used in the clauses. Used
/// expressions include only the children that must be evaluated at the
/// runtime before entering the construct.
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *) { return true; }
};
/// Class that handles pre-initialization statement for some clauses, like
/// 'shedule', 'firstprivate' etc.
class OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Pre-initialization statement for the clause.
Stmt *PreInit = nullptr;
/// Region that captures the associated stmt.
OpenMPDirectiveKind CaptureRegion = llvm::omp::OMPD_unknown;
protected:
OMPClauseWithPreInit(const OMPClause *This) {
assert(get(This) && "get is not tuned for pre-init.");
}
/// Set pre-initialization statement for the clause.
void
setPreInitStmt(Stmt *S,
OpenMPDirectiveKind ThisRegion = llvm::omp::OMPD_unknown) {
PreInit = S;
CaptureRegion = ThisRegion;
}
public:
/// Get pre-initialization statement for the clause.
const Stmt *getPreInitStmt() const { return PreInit; }
/// Get pre-initialization statement for the clause.
Stmt *getPreInitStmt() { return PreInit; }
/// Get capture region for the stmt in the clause.
OpenMPDirectiveKind getCaptureRegion() const { return CaptureRegion; }
static OMPClauseWithPreInit *get(OMPClause *C);
static const OMPClauseWithPreInit *get(const OMPClause *C);
};
/// Class that handles post-update expression for some clauses, like
/// 'lastprivate', 'reduction' etc.
class OMPClauseWithPostUpdate : public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Post-update expression for the clause.
Expr *PostUpdate = nullptr;
protected:
OMPClauseWithPostUpdate(const OMPClause *This) : OMPClauseWithPreInit(This) {
assert(get(This) && "get is not tuned for post-update.");
}
/// Set pre-initialization statement for the clause.
void setPostUpdateExpr(Expr *S) { PostUpdate = S; }
public:
/// Get post-update expression for the clause.
const Expr *getPostUpdateExpr() const { return PostUpdate; }
/// Get post-update expression for the clause.
Expr *getPostUpdateExpr() { return PostUpdate; }
static OMPClauseWithPostUpdate *get(OMPClause *C);
static const OMPClauseWithPostUpdate *get(const OMPClause *C);
};
/// This structure contains most locations needed for by an OMPVarListClause.
struct OMPVarListLocTy {
/// Starting location of the clause (the clause keyword).
SourceLocation StartLoc;
/// Location of '('.
SourceLocation LParenLoc;
/// Ending location of the clause.
SourceLocation EndLoc;
OMPVarListLocTy() = default;
OMPVarListLocTy(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: StartLoc(StartLoc), LParenLoc(LParenLoc), EndLoc(EndLoc) {}
};
/// This represents clauses with the list of variables like 'private',
/// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the
/// '#pragma omp ...' directives.
template <class T> class OMPVarListClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Number of variables in the list.
unsigned NumVars;
protected:
/// Build a clause with \a N variables
///
/// \param K Kind of the clause.
/// \param StartLoc Starting location of the clause (the clause keyword).
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N)
: OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {}
/// Fetches list of variables associated with this clause.
MutableArrayRef<Expr *> getVarRefs() {
return MutableArrayRef<Expr *>(
static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars);
}
/// Sets the list of variables for this clause.
void setVarRefs(ArrayRef<Expr *> VL) {
assert(VL.size() == NumVars &&
"Number of variables is not the same as the preallocated buffer");
std::copy(VL.begin(), VL.end(),
static_cast<T *>(this)->template getTrailingObjects<Expr *>());
}
public:
using varlist_iterator = MutableArrayRef<Expr *>::iterator;
using varlist_const_iterator = ArrayRef<const Expr *>::iterator;
using varlist_range = llvm::iterator_range<varlist_iterator>;
using varlist_const_range = llvm::iterator_range<varlist_const_iterator>;
unsigned varlist_size() const { return NumVars; }
bool varlist_empty() const { return NumVars == 0; }
varlist_range varlists() {
return varlist_range(varlist_begin(), varlist_end());
}
varlist_const_range varlists() const {
return varlist_const_range(varlist_begin(), varlist_end());
}
varlist_iterator varlist_begin() { return getVarRefs().begin(); }
varlist_iterator varlist_end() { return getVarRefs().end(); }
varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); }
varlist_const_iterator varlist_end() const { return getVarRefs().end(); }
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Fetches list of all variables in the clause.
ArrayRef<const Expr *> getVarRefs() const {
return llvm::makeArrayRef(
static_cast<const T *>(this)->template getTrailingObjects<Expr *>(),
NumVars);
}
};
/// This represents 'allocator' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp allocate(a) allocator(omp_default_mem_alloc)
/// \endcode
/// In this example directive '#pragma omp allocate' has simple 'allocator'
/// clause with the allocator 'omp_default_mem_alloc'.
class OMPAllocatorClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Expression with the allocator.
Stmt *Allocator = nullptr;
/// Set allocator.
void setAllocator(Expr *A) { Allocator = A; }
public:
/// Build 'allocator' clause with the given allocator.
///
/// \param A Allocator.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPAllocatorClause(Expr *A, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_allocator, StartLoc, EndLoc),
LParenLoc(LParenLoc), Allocator(A) {}
/// Build an empty clause.
OMPAllocatorClause()
: OMPClause(llvm::omp::OMPC_allocator, SourceLocation(),
SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns allocator.
Expr *getAllocator() const { return cast_or_null<Expr>(Allocator); }
child_range children() { return child_range(&Allocator, &Allocator + 1); }
const_child_range children() const {
return const_child_range(&Allocator, &Allocator + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_allocator;
}
};
/// This represents clause 'allocate' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel private(a) allocate(omp_default_mem_alloc :a)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'private'
/// and clause 'allocate' for the variable 'a'.
class OMPAllocateClause final
: public OMPVarListClause<OMPAllocateClause>,
private llvm::TrailingObjects<OMPAllocateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Allocator specified in the clause, or 'nullptr' if the default one is
/// used.
Expr *Allocator = nullptr;
/// Position of the ':' delimiter in the clause;
SourceLocation ColonLoc;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Allocator Allocator expression.
/// \param ColonLoc Location of ':' delimiter.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPAllocateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
Expr *Allocator, SourceLocation ColonLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPAllocateClause>(llvm::omp::OMPC_allocate, StartLoc,
LParenLoc, EndLoc, N),
Allocator(Allocator), ColonLoc(ColonLoc) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPAllocateClause(unsigned N)
: OMPVarListClause<OMPAllocateClause>(llvm::omp::OMPC_allocate,
SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
void setAllocator(Expr *A) { Allocator = A; }
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Allocator Allocator expression.
/// \param ColonLoc Location of ':' delimiter.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
static OMPAllocateClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, Expr *Allocator,
SourceLocation ColonLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL);
/// Returns the allocator expression or nullptr, if no allocator is specified.
Expr *getAllocator() const { return Allocator; }
/// Returns the location of the ':' delimiter.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPAllocateClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPAllocateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_allocate;
}
};
/// This represents 'if' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp parallel if(parallel:a > 5)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'if' clause with
/// condition 'a > 5' and directive name modifier 'parallel'.
class OMPIfClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Condition of the 'if' clause.
Stmt *Condition = nullptr;
/// Location of ':' (if any).
SourceLocation ColonLoc;
/// Directive name modifier for the clause.
OpenMPDirectiveKind NameModifier = llvm::omp::OMPD_unknown;
/// Name modifier location.
SourceLocation NameModifierLoc;
/// Set condition.
void setCondition(Expr *Cond) { Condition = Cond; }
/// Set directive name modifier for the clause.
void setNameModifier(OpenMPDirectiveKind NM) { NameModifier = NM; }
/// Set location of directive name modifier for the clause.
void setNameModifierLoc(SourceLocation Loc) { NameModifierLoc = Loc; }
/// Set location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// Build 'if' clause with condition \a Cond.
///
/// \param NameModifier [OpenMP 4.1] Directive name modifier of clause.
/// \param Cond Condition of the clause.
/// \param HelperCond Helper condition for the clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param NameModifierLoc Location of directive name modifier.
/// \param ColonLoc [OpenMP 4.1] Location of ':'.
/// \param EndLoc Ending location of the clause.
OMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Cond, Stmt *HelperCond,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation NameModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_if, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond),
ColonLoc(ColonLoc), NameModifier(NameModifier),
NameModifierLoc(NameModifierLoc) {
setPreInitStmt(HelperCond, CaptureRegion);
}
/// Build an empty clause.
OMPIfClause()
: OMPClause(llvm::omp::OMPC_if, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Returns condition.
Expr *getCondition() const { return cast_or_null<Expr>(Condition); }
/// Return directive name modifier associated with the clause.
OpenMPDirectiveKind getNameModifier() const { return NameModifier; }
/// Return the location of directive name modifier.
SourceLocation getNameModifierLoc() const { return NameModifierLoc; }
child_range children() { return child_range(&Condition, &Condition + 1); }
const_child_range children() const {
return const_child_range(&Condition, &Condition + 1);
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPIfClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_if;
}
};
/// This represents 'final' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp task final(a > 5)
/// \endcode
/// In this example directive '#pragma omp task' has simple 'final'
/// clause with condition 'a > 5'.
class OMPFinalClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Condition of the 'if' clause.
Stmt *Condition = nullptr;
/// Set condition.
void setCondition(Expr *Cond) { Condition = Cond; }
public:
/// Build 'final' clause with condition \a Cond.
///
/// \param Cond Condition of the clause.
/// \param HelperCond Helper condition for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPFinalClause(Expr *Cond, Stmt *HelperCond,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_final, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond) {
setPreInitStmt(HelperCond, CaptureRegion);
}
/// Build an empty clause.
OMPFinalClause()
: OMPClause(llvm::omp::OMPC_final, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns condition.
Expr *getCondition() const { return cast_or_null<Expr>(Condition); }
child_range children() { return child_range(&Condition, &Condition + 1); }
const_child_range children() const {
return const_child_range(&Condition, &Condition + 1);
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPFinalClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_final;
}
};
/// This represents 'num_threads' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp parallel num_threads(6)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'num_threads'
/// clause with number of threads '6'.
class OMPNumThreadsClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Condition of the 'num_threads' clause.
Stmt *NumThreads = nullptr;
/// Set condition.
void setNumThreads(Expr *NThreads) { NumThreads = NThreads; }
public:
/// Build 'num_threads' clause with condition \a NumThreads.
///
/// \param NumThreads Number of threads for the construct.
/// \param HelperNumThreads Helper Number of threads for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPNumThreadsClause(Expr *NumThreads, Stmt *HelperNumThreads,
OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_num_threads, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc),
NumThreads(NumThreads) {
setPreInitStmt(HelperNumThreads, CaptureRegion);
}
/// Build an empty clause.
OMPNumThreadsClause()
: OMPClause(llvm::omp::OMPC_num_threads, SourceLocation(),
SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns number of threads.
Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); }
child_range children() { return child_range(&NumThreads, &NumThreads + 1); }
const_child_range children() const {
return const_child_range(&NumThreads, &NumThreads + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_num_threads;
}
};
/// This represents 'safelen' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd safelen(4)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'safelen'
/// with single expression '4'.
/// If the safelen clause is used then no two iterations executed
/// concurrently with SIMD instructions can have a greater distance
/// in the logical iteration space than its value. The parameter of
/// the safelen clause must be a constant positive integer expression.
class OMPSafelenClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *Safelen = nullptr;
/// Set safelen.
void setSafelen(Expr *Len) { Safelen = Len; }
public:
/// Build 'safelen' clause.
///
/// \param Len Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_safelen, StartLoc, EndLoc),
LParenLoc(LParenLoc), Safelen(Len) {}
/// Build an empty clause.
explicit OMPSafelenClause()
: OMPClause(llvm::omp::OMPC_safelen, SourceLocation(), SourceLocation()) {
}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); }
child_range children() { return child_range(&Safelen, &Safelen + 1); }
const_child_range children() const {
return const_child_range(&Safelen, &Safelen + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_safelen;
}
};
/// This represents 'simdlen' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd simdlen(4)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'simdlen'
/// with single expression '4'.
/// If the 'simdlen' clause is used then it specifies the preferred number of
/// iterations to be executed concurrently. The parameter of the 'simdlen'
/// clause must be a constant positive integer expression.
class OMPSimdlenClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *Simdlen = nullptr;
/// Set simdlen.
void setSimdlen(Expr *Len) { Simdlen = Len; }
public:
/// Build 'simdlen' clause.
///
/// \param Len Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_simdlen, StartLoc, EndLoc),
LParenLoc(LParenLoc), Simdlen(Len) {}
/// Build an empty clause.
explicit OMPSimdlenClause()
: OMPClause(llvm::omp::OMPC_simdlen, SourceLocation(), SourceLocation()) {
}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); }
child_range children() { return child_range(&Simdlen, &Simdlen + 1); }
const_child_range children() const {
return const_child_range(&Simdlen, &Simdlen + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_simdlen;
}
};
/// This represents 'collapse' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd collapse(3)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'collapse'
/// with single expression '3'.
/// The parameter must be a constant positive integer expression, it specifies
/// the number of nested loops that should be collapsed into a single iteration
/// space.
class OMPCollapseClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Number of for-loops.
Stmt *NumForLoops = nullptr;
/// Set the number of associated for-loops.
void setNumForLoops(Expr *Num) { NumForLoops = Num; }
public:
/// Build 'collapse' clause.
///
/// \param Num Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPCollapseClause(Expr *Num, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_collapse, StartLoc, EndLoc),
LParenLoc(LParenLoc), NumForLoops(Num) {}
/// Build an empty clause.
explicit OMPCollapseClause()
: OMPClause(llvm::omp::OMPC_collapse, SourceLocation(),
SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return the number of associated for-loops.
Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); }
child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); }
const_child_range children() const {
return const_child_range(&NumForLoops, &NumForLoops + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_collapse;
}
};
/// This represents 'default' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp parallel default(shared)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'default'
/// clause with kind 'shared'.
class OMPDefaultClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'default' clause.
llvm::omp::DefaultKind Kind = llvm::omp::OMP_DEFAULT_unknown;
/// Start location of the kind in source code.
SourceLocation KindKwLoc;
/// Set kind of the clauses.
///
/// \param K Argument of clause.
void setDefaultKind(llvm::omp::DefaultKind K) { Kind = K; }
/// Set argument location.
///
/// \param KLoc Argument location.
void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
public:
/// Build 'default' clause with argument \a A ('none' or 'shared').
///
/// \param A Argument of the clause ('none' or 'shared').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPDefaultClause(llvm::omp::DefaultKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_default, StartLoc, EndLoc),
LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPDefaultClause()
: OMPClause(llvm::omp::OMPC_default, SourceLocation(), SourceLocation()) {
}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns kind of the clause.
llvm::omp::DefaultKind getDefaultKind() const { return Kind; }
/// Returns location of clause kind.
SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_default;
}
};
/// This represents 'proc_bind' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp parallel proc_bind(master)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'proc_bind'
/// clause with kind 'master'.
class OMPProcBindClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'proc_bind' clause.
llvm::omp::ProcBindKind Kind = llvm::omp::OMP_PROC_BIND_unknown;
/// Start location of the kind in source code.
SourceLocation KindKwLoc;
/// Set kind of the clause.
///
/// \param K Kind of clause.
void setProcBindKind(llvm::omp::ProcBindKind K) { Kind = K; }
/// Set clause kind location.
///
/// \param KLoc Kind location.
void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
public:
/// Build 'proc_bind' clause with argument \a A ('master', 'close' or
/// 'spread').
///
/// \param A Argument of the clause ('master', 'close' or 'spread').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPProcBindClause(llvm::omp::ProcBindKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_proc_bind, StartLoc, EndLoc),
LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPProcBindClause()
: OMPClause(llvm::omp::OMPC_proc_bind, SourceLocation(),
SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns kind of the clause.
llvm::omp::ProcBindKind getProcBindKind() const { return Kind; }
/// Returns location of clause kind.
SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_proc_bind;
}
};
/// This represents 'unified_address' clause in the '#pragma omp requires'
/// directive.
///
/// \code
/// #pragma omp requires unified_address
/// \endcode
/// In this example directive '#pragma omp requires' has 'unified_address'
/// clause.
class OMPUnifiedAddressClause final : public OMPClause {
public:
friend class OMPClauseReader;
/// Build 'unified_address' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_unified_address, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPUnifiedAddressClause()
: OMPClause(llvm::omp::OMPC_unified_address, SourceLocation(),
SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_unified_address;
}
};
/// This represents 'unified_shared_memory' clause in the '#pragma omp requires'
/// directive.
///
/// \code
/// #pragma omp requires unified_shared_memory
/// \endcode
/// In this example directive '#pragma omp requires' has 'unified_shared_memory'
/// clause.
class OMPUnifiedSharedMemoryClause final : public OMPClause {
public:
friend class OMPClauseReader;
/// Build 'unified_shared_memory' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_unified_shared_memory, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPUnifiedSharedMemoryClause()
: OMPClause(llvm::omp::OMPC_unified_shared_memory, SourceLocation(),
SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_unified_shared_memory;
}
};
/// This represents 'reverse_offload' clause in the '#pragma omp requires'
/// directive.
///
/// \code
/// #pragma omp requires reverse_offload
/// \endcode
/// In this example directive '#pragma omp requires' has 'reverse_offload'
/// clause.
class OMPReverseOffloadClause final : public OMPClause {
public:
friend class OMPClauseReader;
/// Build 'reverse_offload' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_reverse_offload, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPReverseOffloadClause()
: OMPClause(llvm::omp::OMPC_reverse_offload, SourceLocation(),
SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_reverse_offload;
}
};
/// This represents 'dynamic_allocators' clause in the '#pragma omp requires'
/// directive.
///
/// \code
/// #pragma omp requires dynamic_allocators
/// \endcode
/// In this example directive '#pragma omp requires' has 'dynamic_allocators'
/// clause.
class OMPDynamicAllocatorsClause final : public OMPClause {
public:
friend class OMPClauseReader;
/// Build 'dynamic_allocators' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_dynamic_allocators, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPDynamicAllocatorsClause()
: OMPClause(llvm::omp::OMPC_dynamic_allocators, SourceLocation(),
SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_dynamic_allocators;
}
};
/// This represents 'atomic_default_mem_order' clause in the '#pragma omp
/// requires' directive.
///
/// \code
/// #pragma omp requires atomic_default_mem_order(seq_cst)
/// \endcode
/// In this example directive '#pragma omp requires' has simple
/// atomic_default_mem_order' clause with kind 'seq_cst'.
class OMPAtomicDefaultMemOrderClause final : public OMPClause {
friend class OMPClauseReader;
/// Location of '('
SourceLocation LParenLoc;
/// A kind of the 'atomic_default_mem_order' clause.
OpenMPAtomicDefaultMemOrderClauseKind Kind =
OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown;
/// Start location of the kind in source code.
SourceLocation KindKwLoc;
/// Set kind of the clause.
///
/// \param K Kind of clause.
void setAtomicDefaultMemOrderKind(OpenMPAtomicDefaultMemOrderClauseKind K) {
Kind = K;
}
/// Set clause kind location.
///
/// \param KLoc Kind location.
void setAtomicDefaultMemOrderKindKwLoc(SourceLocation KLoc) {
KindKwLoc = KLoc;
}
public:
/// Build 'atomic_default_mem_order' clause with argument \a A ('seq_cst',
/// 'acq_rel' or 'relaxed').
///
/// \param A Argument of the clause ('seq_cst', 'acq_rel' or 'relaxed').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPAtomicDefaultMemOrderClause(OpenMPAtomicDefaultMemOrderClauseKind A,
SourceLocation ALoc, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_atomic_default_mem_order, StartLoc, EndLoc),
LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPAtomicDefaultMemOrderClause()
: OMPClause(llvm::omp::OMPC_atomic_default_mem_order, SourceLocation(),
SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the locaiton of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns kind of the clause.
OpenMPAtomicDefaultMemOrderClauseKind getAtomicDefaultMemOrderKind() const {
return Kind;
}
/// Returns location of clause kind.
SourceLocation getAtomicDefaultMemOrderKindKwLoc() const { return KindKwLoc; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_atomic_default_mem_order;
}
};
/// This represents 'schedule' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for schedule(static, 3)
/// \endcode
/// In this example directive '#pragma omp for' has 'schedule' clause with
/// arguments 'static' and '3'.
class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'schedule' clause.
OpenMPScheduleClauseKind Kind = OMPC_SCHEDULE_unknown;
/// Modifiers for 'schedule' clause.
enum {FIRST, SECOND, NUM_MODIFIERS};
OpenMPScheduleClauseModifier Modifiers[NUM_MODIFIERS];
/// Locations of modifiers.
SourceLocation ModifiersLoc[NUM_MODIFIERS];
/// Start location of the schedule ind in source code.
SourceLocation KindLoc;
/// Location of ',' (if any).
SourceLocation CommaLoc;
/// Chunk size.
Expr *ChunkSize = nullptr;
/// Set schedule kind.
///
/// \param K Schedule kind.
void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; }
/// Set the first schedule modifier.
///
/// \param M Schedule modifier.
void setFirstScheduleModifier(OpenMPScheduleClauseModifier M) {
Modifiers[FIRST] = M;
}
/// Set the second schedule modifier.
///
/// \param M Schedule modifier.
void setSecondScheduleModifier(OpenMPScheduleClauseModifier M) {
Modifiers[SECOND] = M;
}
/// Set location of the first schedule modifier.
void setFirstScheduleModifierLoc(SourceLocation Loc) {
ModifiersLoc[FIRST] = Loc;
}
/// Set location of the second schedule modifier.
void setSecondScheduleModifierLoc(SourceLocation Loc) {
ModifiersLoc[SECOND] = Loc;
}
/// Set schedule modifier location.
///
/// \param M Schedule modifier location.
void setScheduleModifer(OpenMPScheduleClauseModifier M) {
if (Modifiers[FIRST] == OMPC_SCHEDULE_MODIFIER_unknown)
Modifiers[FIRST] = M;
else {
assert(Modifiers[SECOND] == OMPC_SCHEDULE_MODIFIER_unknown);
Modifiers[SECOND] = M;
}
}
/// Sets the location of '('.
///
/// \param Loc Location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Set schedule kind start location.
///
/// \param KLoc Schedule kind location.
void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
/// Set location of ','.
///
/// \param Loc Location of ','.
void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; }
/// Set chunk size.
///
/// \param E Chunk size.
void setChunkSize(Expr *E) { ChunkSize = E; }
public:
/// Build 'schedule' clause with schedule kind \a Kind and chunk size
/// expression \a ChunkSize.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param CommaLoc Location of ','.
/// \param EndLoc Ending location of the clause.
/// \param Kind Schedule kind.
/// \param ChunkSize Chunk size.
/// \param HelperChunkSize Helper chunk size for combined directives.
/// \param M1 The first modifier applied to 'schedule' clause.
/// \param M1Loc Location of the first modifier
/// \param M2 The second modifier applied to 'schedule' clause.
/// \param M2Loc Location of the second modifier
OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation KLoc, SourceLocation CommaLoc,
SourceLocation EndLoc, OpenMPScheduleClauseKind Kind,
Expr *ChunkSize, Stmt *HelperChunkSize,
OpenMPScheduleClauseModifier M1, SourceLocation M1Loc,
OpenMPScheduleClauseModifier M2, SourceLocation M2Loc)
: OMPClause(llvm::omp::OMPC_schedule, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind),
KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) {
setPreInitStmt(HelperChunkSize);
Modifiers[FIRST] = M1;
Modifiers[SECOND] = M2;
ModifiersLoc[FIRST] = M1Loc;
ModifiersLoc[SECOND] = M2Loc;
}
/// Build an empty clause.
explicit OMPScheduleClause()
: OMPClause(llvm::omp::OMPC_schedule, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {
Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown;
Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown;
}
/// Get kind of the clause.
OpenMPScheduleClauseKind getScheduleKind() const { return Kind; }
/// Get the first modifier of the clause.
OpenMPScheduleClauseModifier getFirstScheduleModifier() const {
return Modifiers[FIRST];
}
/// Get the second modifier of the clause.
OpenMPScheduleClauseModifier getSecondScheduleModifier() const {
return Modifiers[SECOND];
}
/// Get location of '('.
SourceLocation getLParenLoc() { return LParenLoc; }
/// Get kind location.
SourceLocation getScheduleKindLoc() { return KindLoc; }
/// Get the first modifier location.
SourceLocation getFirstScheduleModifierLoc() const {
return ModifiersLoc[FIRST];
}
/// Get the second modifier location.
SourceLocation getSecondScheduleModifierLoc() const {
return ModifiersLoc[SECOND];
}
/// Get location of ','.
SourceLocation getCommaLoc() { return CommaLoc; }
/// Get chunk size.
Expr *getChunkSize() { return ChunkSize; }
/// Get chunk size.
const Expr *getChunkSize() const { return ChunkSize; }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(&ChunkSize),
reinterpret_cast<Stmt **>(&ChunkSize) + 1);
}
const_child_range children() const {
auto Children = const_cast<OMPScheduleClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_schedule;
}
};
/// This represents 'ordered' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for ordered (2)
/// \endcode
/// In this example directive '#pragma omp for' has 'ordered' clause with
/// parameter 2.
class OMPOrderedClause final
: public OMPClause,
private llvm::TrailingObjects<OMPOrderedClause, Expr *> {
friend class OMPClauseReader;
friend TrailingObjects;
/// Location of '('.
SourceLocation LParenLoc;
/// Number of for-loops.
Stmt *NumForLoops = nullptr;
/// Real number of loops.
unsigned NumberOfLoops = 0;
/// Build 'ordered' clause.
///
/// \param Num Expression, possibly associated with this clause.
/// \param NumLoops Number of loops, associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPOrderedClause(Expr *Num, unsigned NumLoops, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_ordered, StartLoc, EndLoc),
LParenLoc(LParenLoc), NumForLoops(Num), NumberOfLoops(NumLoops) {}
/// Build an empty clause.
explicit OMPOrderedClause(unsigned NumLoops)
: OMPClause(llvm::omp::OMPC_ordered, SourceLocation(), SourceLocation()),
NumberOfLoops(NumLoops) {}
/// Set the number of associated for-loops.
void setNumForLoops(Expr *Num) { NumForLoops = Num; }
public:
/// Build 'ordered' clause.
///
/// \param Num Expression, possibly associated with this clause.
/// \param NumLoops Number of loops, associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
static OMPOrderedClause *Create(const ASTContext &C, Expr *Num,
unsigned NumLoops, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Build an empty clause.
static OMPOrderedClause* CreateEmpty(const ASTContext &C, unsigned NumLoops);
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return the number of associated for-loops.
Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); }
/// Set number of iterations for the specified loop.
void setLoopNumIterations(unsigned NumLoop, Expr *NumIterations);
/// Get number of iterations for all the loops.
ArrayRef<Expr *> getLoopNumIterations() const;
/// Set loop counter for the specified loop.
void setLoopCounter(unsigned NumLoop, Expr *Counter);
/// Get loops counter for the specified loop.
Expr *getLoopCounter(unsigned NumLoop);
const Expr *getLoopCounter(unsigned NumLoop) const;
child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); }
const_child_range children() const {
return const_child_range(&NumForLoops, &NumForLoops + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_ordered;
}
};
/// This represents 'nowait' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for nowait
/// \endcode
/// In this example directive '#pragma omp for' has 'nowait' clause.
class OMPNowaitClause : public OMPClause {
public:
/// Build 'nowait' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_nowait, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPNowaitClause()
: OMPClause(llvm::omp::OMPC_nowait, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_nowait;
}
};
/// This represents 'untied' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp task untied
/// \endcode
/// In this example directive '#pragma omp task' has 'untied' clause.
class OMPUntiedClause : public OMPClause {
public:
/// Build 'untied' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_untied, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPUntiedClause()
: OMPClause(llvm::omp::OMPC_untied, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_untied;
}
};
/// This represents 'mergeable' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp task mergeable
/// \endcode
/// In this example directive '#pragma omp task' has 'mergeable' clause.
class OMPMergeableClause : public OMPClause {
public:
/// Build 'mergeable' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_mergeable, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPMergeableClause()
: OMPClause(llvm::omp::OMPC_mergeable, SourceLocation(),
SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_mergeable;
}
};
/// This represents 'read' clause in the '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic read
/// \endcode
/// In this example directive '#pragma omp atomic' has 'read' clause.
class OMPReadClause : public OMPClause {
public:
/// Build 'read' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_read, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPReadClause()
: OMPClause(llvm::omp::OMPC_read, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_read;
}
};
/// This represents 'write' clause in the '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic write
/// \endcode
/// In this example directive '#pragma omp atomic' has 'write' clause.
class OMPWriteClause : public OMPClause {
public:
/// Build 'write' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_write, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPWriteClause()
: OMPClause(llvm::omp::OMPC_write, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_write;
}
};
/// This represents 'update' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic update
/// \endcode
/// In this example directive '#pragma omp atomic' has 'update' clause.
/// Also, this class represents 'update' clause in '#pragma omp depobj'
/// directive.
///
/// \code
/// #pragma omp depobj(a) update(in)
/// \endcode
/// In this example directive '#pragma omp depobj' has 'update' clause with 'in'
/// dependence kind.
class OMPUpdateClause final
: public OMPClause,
private llvm::TrailingObjects<OMPUpdateClause, SourceLocation,
OpenMPDependClauseKind> {
friend class OMPClauseReader;
friend TrailingObjects;
/// true if extended version of the clause for 'depobj' directive.
bool IsExtended = false;
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<SourceLocation>) const {
// 2 locations: for '(' and argument location.
return IsExtended ? 2 : 0;
}
/// Sets the the location of '(' in clause for 'depobj' directive.
void setLParenLoc(SourceLocation Loc) {
assert(IsExtended && "Expected extended clause.");
*getTrailingObjects<SourceLocation>() = Loc;
}
/// Sets the the location of '(' in clause for 'depobj' directive.
void setArgumentLoc(SourceLocation Loc) {
assert(IsExtended && "Expected extended clause.");
*std::next(getTrailingObjects<SourceLocation>(), 1) = Loc;
}
/// Sets the dependence kind for the clause for 'depobj' directive.
void setDependencyKind(OpenMPDependClauseKind DK) {
assert(IsExtended && "Expected extended clause.");
*getTrailingObjects<OpenMPDependClauseKind>() = DK;
}
/// Build 'update' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc,
bool IsExtended)
: OMPClause(llvm::omp::OMPC_update, StartLoc, EndLoc),
IsExtended(IsExtended) {}
/// Build an empty clause.
OMPUpdateClause(bool IsExtended)
: OMPClause(llvm::omp::OMPC_update, SourceLocation(), SourceLocation()),
IsExtended(IsExtended) {}
public:
/// Creates clause for 'atomic' directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
static OMPUpdateClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Creates clause for 'depobj' directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ArgumentLoc Location of the argument.
/// \param DK Dependence kind.
/// \param EndLoc Ending location of the clause.
static OMPUpdateClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ArgumentLoc,
OpenMPDependClauseKind DK,
SourceLocation EndLoc);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param IsExtended true if extended clause for 'depobj' directive must be
/// created.
static OMPUpdateClause *CreateEmpty(const ASTContext &C, bool IsExtended);
/// Checks if the clause is the extended clauses for 'depobj' directive.
bool isExtended() const { return IsExtended; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
/// Gets the the location of '(' in clause for 'depobj' directive.
SourceLocation getLParenLoc() const {
assert(IsExtended && "Expected extended clause.");
return *getTrailingObjects<SourceLocation>();
}
/// Gets the the location of argument in clause for 'depobj' directive.
SourceLocation getArgumentLoc() const {
assert(IsExtended && "Expected extended clause.");
return *std::next(getTrailingObjects<SourceLocation>(), 1);
}
/// Gets the dependence kind in clause for 'depobj' directive.
OpenMPDependClauseKind getDependencyKind() const {
assert(IsExtended && "Expected extended clause.");
return *getTrailingObjects<OpenMPDependClauseKind>();
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_update;
}
};
/// This represents 'capture' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic capture
/// \endcode
/// In this example directive '#pragma omp atomic' has 'capture' clause.
class OMPCaptureClause : public OMPClause {
public:
/// Build 'capture' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_capture, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPCaptureClause()
: OMPClause(llvm::omp::OMPC_capture, SourceLocation(), SourceLocation()) {
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_capture;
}
};
/// This represents 'seq_cst' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic seq_cst
/// \endcode
/// In this example directive '#pragma omp atomic' has 'seq_cst' clause.
class OMPSeqCstClause : public OMPClause {
public:
/// Build 'seq_cst' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_seq_cst, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPSeqCstClause()
: OMPClause(llvm::omp::OMPC_seq_cst, SourceLocation(), SourceLocation()) {
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_seq_cst;
}
};
/// This represents 'acq_rel' clause in the '#pragma omp atomic|flush'
/// directives.
///
/// \code
/// #pragma omp flush acq_rel
/// \endcode
/// In this example directive '#pragma omp flush' has 'acq_rel' clause.
class OMPAcqRelClause final : public OMPClause {
public:
/// Build 'ack_rel' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_acq_rel, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPAcqRelClause()
: OMPClause(llvm::omp::OMPC_acq_rel, SourceLocation(), SourceLocation()) {
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_acq_rel;
}
};
/// This represents 'acquire' clause in the '#pragma omp atomic|flush'
/// directives.
///
/// \code
/// #pragma omp flush acquire
/// \endcode
/// In this example directive '#pragma omp flush' has 'acquire' clause.
class OMPAcquireClause final : public OMPClause {
public:
/// Build 'acquire' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_acquire, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPAcquireClause()
: OMPClause(llvm::omp::OMPC_acquire, SourceLocation(), SourceLocation()) {
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_acquire;
}
};
/// This represents 'release' clause in the '#pragma omp atomic|flush'
/// directives.
///
/// \code
/// #pragma omp flush release
/// \endcode
/// In this example directive '#pragma omp flush' has 'release' clause.
class OMPReleaseClause final : public OMPClause {
public:
/// Build 'release' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_release, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPReleaseClause()
: OMPClause(llvm::omp::OMPC_release, SourceLocation(), SourceLocation()) {
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_release;
}
};
/// This represents 'relaxed' clause in the '#pragma omp atomic'
/// directives.
///
/// \code
/// #pragma omp atomic relaxed
/// \endcode
/// In this example directive '#pragma omp atomic' has 'relaxed' clause.
class OMPRelaxedClause final : public OMPClause {
public:
/// Build 'relaxed' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_relaxed, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPRelaxedClause()
: OMPClause(llvm::omp::OMPC_relaxed, SourceLocation(), SourceLocation()) {
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_relaxed;
}
};
/// This represents clause 'private' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel private(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'private'
/// with the variables 'a' and 'b'.
class OMPPrivateClause final
: public OMPVarListClause<OMPPrivateClause>,
private llvm::TrailingObjects<OMPPrivateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPPrivateClause>(llvm::omp::OMPC_private, StartLoc,
LParenLoc, EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPPrivateClause(unsigned N)
: OMPVarListClause<OMPPrivateClause>(llvm::omp::OMPC_private,
SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
/// Sets the list of references to private copies with initializers for
/// new private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// Gets the list of references to private copies with initializers for
/// new private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param PrivateVL List of references to private copies with initializers.
static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL,
ArrayRef<Expr *> PrivateVL);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using private_copies_iterator = MutableArrayRef<Expr *>::iterator;
using private_copies_const_iterator = ArrayRef<const Expr *>::iterator;
using private_copies_range = llvm::iterator_range<private_copies_iterator>;
using private_copies_const_range =
llvm::iterator_range<private_copies_const_iterator>;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPPrivateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_private;
}
};
/// This represents clause 'firstprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp parallel firstprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'firstprivate'
/// with the variables 'a' and 'b'.
class OMPFirstprivateClause final
: public OMPVarListClause<OMPFirstprivateClause>,
public OMPClauseWithPreInit,
private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPFirstprivateClause>(llvm::omp::OMPC_firstprivate,
StartLoc, LParenLoc, EndLoc, N),
OMPClauseWithPreInit(this) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPFirstprivateClause(unsigned N)
: OMPVarListClause<OMPFirstprivateClause>(
llvm::omp::OMPC_firstprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPreInit(this) {}
/// Sets the list of references to private copies with initializers for
/// new private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// Gets the list of references to private copies with initializers for
/// new private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Sets the list of references to initializer variables for new
/// private variables.
/// \param VL List of references.
void setInits(ArrayRef<Expr *> VL);
/// Gets the list of references to initializer variables for new
/// private variables.
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the original variables.
/// \param PrivateVL List of references to private copies with initializers.
/// \param InitVL List of references to auto generated variables used for
/// initialization of a single array element. Used if firstprivate variable is
/// of array type.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
static OMPFirstprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL,
ArrayRef<Expr *> InitVL, Stmt *PreInit);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using private_copies_iterator = MutableArrayRef<Expr *>::iterator;
using private_copies_const_iterator = ArrayRef<const Expr *>::iterator;
using private_copies_range = llvm::iterator_range<private_copies_iterator>;
using private_copies_const_range =
llvm::iterator_range<private_copies_const_iterator>;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
using inits_iterator = MutableArrayRef<Expr *>::iterator;
using inits_const_iterator = ArrayRef<const Expr *>::iterator;
using inits_range = llvm::iterator_range<inits_iterator>;
using inits_const_range = llvm::iterator_range<inits_const_iterator>;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPFirstprivateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range used_children() const {
auto Children = const_cast<OMPFirstprivateClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_firstprivate;
}
};
/// This represents clause 'lastprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd lastprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'lastprivate'
/// with the variables 'a' and 'b'.
class OMPLastprivateClause final
: public OMPVarListClause<OMPLastprivateClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPLastprivateClause, Expr *> {
// There are 4 additional tail-allocated arrays at the end of the class:
// 1. Contains list of pseudo variables with the default initialization for
// each non-firstprivate variables. Used in codegen for initialization of
// lastprivate copies.
// 2. List of helper expressions for proper generation of assignment operation
// required for lastprivate clause. This list represents private variables
// (for arrays, single array element).
// 3. List of helper expressions for proper generation of assignment operation
// required for lastprivate clause. This list represents original variables
// (for arrays, single array element).
// 4. List of helper expressions that represents assignment operation:
// \code
// DstExprs = SrcExprs;
// \endcode
// Required for proper codegen of final assignment performed by the
// lastprivate clause.
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Optional lastprivate kind, e.g. 'conditional', if specified by user.
OpenMPLastprivateModifier LPKind;
/// Optional location of the lasptrivate kind, if specified by user.
SourceLocation LPKindLoc;
/// Optional colon location, if specified by user.
SourceLocation ColonLoc;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
unsigned N)
: OMPVarListClause<OMPLastprivateClause>(llvm::omp::OMPC_lastprivate,
StartLoc, LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), LPKind(LPKind), LPKindLoc(LPKindLoc),
ColonLoc(ColonLoc) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPLastprivateClause(unsigned N)
: OMPVarListClause<OMPLastprivateClause>(
llvm::omp::OMPC_lastprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// Get the list of helper expressions for initialization of private
/// copies for lastprivate variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent private variables (for arrays, single
/// array element) in the final assignment statement performed by the
/// lastprivate clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent original variables (for arrays, single
/// array element) in the final assignment statement performed by the
/// lastprivate clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign private copy of the variable to original variable.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
/// Sets lastprivate kind.
void setKind(OpenMPLastprivateModifier Kind) { LPKind = Kind; }
/// Sets location of the lastprivate kind.
void setKindLoc(SourceLocation Loc) { LPKindLoc = Loc; }
/// Sets colon symbol location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for lastprivate clause. This list represents
/// private variables (for arrays, single array element).
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for lastprivate clause. This list represents
/// original variables (for arrays, single array element).
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of final assignment performed by the
/// lastprivate clause.
/// \param LPKind Lastprivate kind, e.g. 'conditional'.
/// \param LPKindLoc Location of the lastprivate kind.
/// \param ColonLoc Location of the ':' symbol if lastprivate kind is used.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPLastprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps,
OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc,
SourceLocation ColonLoc, Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Lastprivate kind.
OpenMPLastprivateModifier getKind() const { return LPKind; }
/// Returns the location of the lastprivate kind.
SourceLocation getKindLoc() const { return LPKindLoc; }
/// Returns the location of the ':' symbol, if any.
SourceLocation getColonLoc() const { return ColonLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
/// Set list of helper expressions, required for generation of private
/// copies of original lastprivate variables.
void setPrivateCopies(ArrayRef<Expr *> PrivateCopies);
helper_expr_const_range private_copies() const {
return helper_expr_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
helper_expr_range private_copies() {
return helper_expr_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPLastprivateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_lastprivate;
}
};
/// This represents clause 'shared' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel shared(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'shared'
/// with the variables 'a' and 'b'.
class OMPSharedClause final
: public OMPVarListClause<OMPSharedClause>,
private llvm::TrailingObjects<OMPSharedClause, Expr *> {
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPSharedClause>(llvm::omp::OMPC_shared, StartLoc,
LParenLoc, EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPSharedClause(unsigned N)
: OMPVarListClause<OMPSharedClause>(llvm::omp::OMPC_shared,
SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPSharedClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_shared;
}
};
/// This represents clause 'reduction' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp parallel reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'reduction'
/// with operator '+' and the variables 'a' and 'b'.
class OMPReductionClause final
: public OMPVarListClause<OMPReductionClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPReductionClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Reduction modifier.
OpenMPReductionClauseModifier Modifier = OMPC_REDUCTION_unknown;
/// Reduction modifier location.
SourceLocation ModifierLoc;
/// Location of ':'.
SourceLocation ColonLoc;
/// Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// Name of custom operator.
DeclarationNameInfo NameInfo;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ModifierLoc Modifier location.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc,
OpenMPReductionClauseModifier Modifier, unsigned N,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPReductionClause>(llvm::omp::OMPC_reduction,
StartLoc, LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), Modifier(Modifier),
ModifierLoc(ModifierLoc), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPReductionClause(unsigned N)
: OMPVarListClause<OMPReductionClause>(llvm::omp::OMPC_reduction,
SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// Sets reduction modifier.
void setModifier(OpenMPReductionClauseModifier M) { Modifier = M; }
/// Sets location of the modifier.
void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; }
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent private copy of the reduction
/// variable.
void setPrivates(ArrayRef<Expr *> Privates);
/// Get the list of helper privates.
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent LHS expression in the final
/// reduction expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent RHS expression in the final
/// reduction expression performed by the reduction clause.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
/// Set list of helper copy operations for inscan reductions.
/// The form is: Temps[i] = LHS[i];
void setInscanCopyOps(ArrayRef<Expr *> Ops);
/// Get the list of helper inscan copy operations.
MutableArrayRef<Expr *> getInscanCopyOps() {
return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size());
}
ArrayRef<const Expr *> getInscanCopyOps() const {
return llvm::makeArrayRef(getReductionOps().end(), varlist_size());
}
/// Set list of helper temp vars for inscan copy array operations.
void setInscanCopyArrayTemps(ArrayRef<Expr *> CopyArrayTemps);
/// Get the list of helper inscan copy temps.
MutableArrayRef<Expr *> getInscanCopyArrayTemps() {
return MutableArrayRef<Expr *>(getInscanCopyOps().end(), varlist_size());
}
ArrayRef<const Expr *> getInscanCopyArrayTemps() const {
return llvm::makeArrayRef(getInscanCopyOps().end(), varlist_size());
}
/// Set list of helper temp elements vars for inscan copy array operations.
void setInscanCopyArrayElems(ArrayRef<Expr *> CopyArrayElems);
/// Get the list of helper inscan copy temps.
MutableArrayRef<Expr *> getInscanCopyArrayElems() {
return MutableArrayRef<Expr *>(getInscanCopyArrayTemps().end(),
varlist_size());
}
ArrayRef<const Expr *> getInscanCopyArrayElems() const {
return llvm::makeArrayRef(getInscanCopyArrayTemps().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ModifierLoc Modifier location.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param Privates List of helper expressions for proper generation of
/// private copies.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
/// \param CopyOps List of copy operations for inscan reductions:
/// \code
/// TempExprs = LHSExprs;
/// \endcode
/// \param CopyArrayTemps Temp arrays for prefix sums.
/// \param CopyArrayElems Temp arrays for prefix sums.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, OpenMPReductionClauseModifier Modifier,
ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> CopyOps,
ArrayRef<Expr *> CopyArrayTemps, ArrayRef<Expr *> CopyArrayElems,
Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
/// \param Modifier Reduction modifier.
static OMPReductionClause *
CreateEmpty(const ASTContext &C, unsigned N,
OpenMPReductionClauseModifier Modifier);
/// Returns modifier.
OpenMPReductionClauseModifier getModifier() const { return Modifier; }
/// Returns modifier location.
SourceLocation getModifierLoc() const { return ModifierLoc; }
/// Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range privates() const {
return helper_expr_const_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_range privates() {
return helper_expr_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_const_range copy_ops() const {
return helper_expr_const_range(getInscanCopyOps().begin(),
getInscanCopyOps().end());
}
helper_expr_range copy_ops() {
return helper_expr_range(getInscanCopyOps().begin(),
getInscanCopyOps().end());
}
helper_expr_const_range copy_array_temps() const {
return helper_expr_const_range(getInscanCopyArrayTemps().begin(),
getInscanCopyArrayTemps().end());
}
helper_expr_range copy_array_temps() {
return helper_expr_range(getInscanCopyArrayTemps().begin(),
getInscanCopyArrayTemps().end());
}
helper_expr_const_range copy_array_elems() const {
return helper_expr_const_range(getInscanCopyArrayElems().begin(),
getInscanCopyArrayElems().end());
}
helper_expr_range copy_array_elems() {
return helper_expr_range(getInscanCopyArrayElems().begin(),
getInscanCopyArrayElems().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPReductionClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range used_children() const {
auto Children = const_cast<OMPReductionClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_reduction;
}
};
/// This represents clause 'task_reduction' in the '#pragma omp taskgroup'
/// directives.
///
/// \code
/// #pragma omp taskgroup task_reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp taskgroup' has clause
/// 'task_reduction' with operator '+' and the variables 'a' and 'b'.
class OMPTaskReductionClause final
: public OMPVarListClause<OMPTaskReductionClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPTaskReductionClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// Name of custom operator.
DeclarationNameInfo NameInfo;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param ColonLoc Location of ':'.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
OMPTaskReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned N, NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPTaskReductionClause>(
llvm::omp::OMPC_task_reduction, StartLoc, LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPTaskReductionClause(unsigned N)
: OMPVarListClause<OMPTaskReductionClause>(
llvm::omp::OMPC_task_reduction, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent private copy of the reduction variable.
void setPrivates(ArrayRef<Expr *> Privates);
/// Get the list of helper privates.
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent LHS expression in the final reduction
/// expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent RHS expression in the final reduction
/// expression performed by the reduction clause. Also, variables in these
/// expressions are used for proper initialization of reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param Privates List of helper expressions for proper generation of
/// private copies.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPTaskReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPTaskReductionClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range privates() const {
return helper_expr_const_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_range privates() {
return helper_expr_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPTaskReductionClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_task_reduction;
}
};
/// This represents clause 'in_reduction' in the '#pragma omp task' directives.
///
/// \code
/// #pragma omp task in_reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp task' has clause 'in_reduction' with
/// operator '+' and the variables 'a' and 'b'.
class OMPInReductionClause final
: public OMPVarListClause<OMPInReductionClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPInReductionClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// Name of custom operator.
DeclarationNameInfo NameInfo;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param ColonLoc Location of ':'.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
OMPInReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned N, NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPInReductionClause>(llvm::omp::OMPC_in_reduction,
StartLoc, LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPInReductionClause(unsigned N)
: OMPVarListClause<OMPInReductionClause>(
llvm::omp::OMPC_in_reduction, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent private copy of the reduction variable.
void setPrivates(ArrayRef<Expr *> Privates);
/// Get the list of helper privates.
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent LHS expression in the final reduction
/// expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent RHS expression in the final reduction
/// expression performed by the reduction clause. Also, variables in these
/// expressions are used for proper initialization of reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
/// Set list of helper reduction taskgroup descriptors.
void setTaskgroupDescriptors(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction taskgroup descriptors.
MutableArrayRef<Expr *> getTaskgroupDescriptors() {
return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size());
}
ArrayRef<const Expr *> getTaskgroupDescriptors() const {
return llvm::makeArrayRef(getReductionOps().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param Privates List of helper expressions for proper generation of
/// private copies.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
/// \param TaskgroupDescriptors List of helper taskgroup descriptors for
/// corresponding items in parent taskgroup task_reduction clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPInReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> TaskgroupDescriptors,
Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPInReductionClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range privates() const {
return helper_expr_const_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_range privates() {
return helper_expr_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_const_range taskgroup_descriptors() const {
return helper_expr_const_range(getTaskgroupDescriptors().begin(),
getTaskgroupDescriptors().end());
}
helper_expr_range taskgroup_descriptors() {
return helper_expr_range(getTaskgroupDescriptors().begin(),
getTaskgroupDescriptors().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPInReductionClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_in_reduction;
}
};
/// This represents clause 'linear' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd linear(a,b : 2)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'linear'
/// with variables 'a', 'b' and linear step '2'.
class OMPLinearClause final
: public OMPVarListClause<OMPLinearClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPLinearClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Modifier of 'linear' clause.
OpenMPLinearClauseKind Modifier = OMPC_LINEAR_val;
/// Location of linear modifier if any.
SourceLocation ModifierLoc;
/// Location of ':'.
SourceLocation ColonLoc;
/// Sets the linear step for clause.
void setStep(Expr *Step) { *(getFinals().end()) = Step; }
/// Sets the expression to calculate linear step for clause.
void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; }
/// Build 'linear' clause with given number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of variables.
OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned NumVars)
: OMPVarListClause<OMPLinearClause>(llvm::omp::OMPC_linear, StartLoc,
LParenLoc, EndLoc, NumVars),
OMPClauseWithPostUpdate(this), Modifier(Modifier),
ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {}
/// Build an empty clause.
///
/// \param NumVars Number of variables.
explicit OMPLinearClause(unsigned NumVars)
: OMPVarListClause<OMPLinearClause>(llvm::omp::OMPC_linear,
SourceLocation(), SourceLocation(),
SourceLocation(), NumVars),
OMPClauseWithPostUpdate(this) {}
/// Gets the list of initial values for linear variables.
///
/// There are NumVars expressions with initial values allocated after the
/// varlist, they are followed by NumVars update expressions (used to update
/// the linear variable's value on current iteration) and they are followed by
/// NumVars final expressions (used to calculate the linear variable's
/// value after the loop body). After these lists, there are 2 helper
/// expressions - linear step and a helper to calculate it before the
/// loop body (used when the linear step is not constant):
///
/// { Vars[] /* in OMPVarListClause */; Privates[]; Inits[]; Updates[];
/// Finals[]; Step; CalcStep; }
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Sets the list of update expressions for linear variables.
MutableArrayRef<Expr *> getUpdates() {
return MutableArrayRef<Expr *>(getInits().end(), varlist_size());
}
ArrayRef<const Expr *> getUpdates() const {
return llvm::makeArrayRef(getInits().end(), varlist_size());
}
/// Sets the list of final update expressions for linear variables.
MutableArrayRef<Expr *> getFinals() {
return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size());
}
ArrayRef<const Expr *> getFinals() const {
return llvm::makeArrayRef(getUpdates().end(), varlist_size());
}
/// Gets the list of used expressions for linear variables.
MutableArrayRef<Expr *> getUsedExprs() {
return MutableArrayRef<Expr *>(getFinals().end() + 2, varlist_size() + 1);
}
ArrayRef<const Expr *> getUsedExprs() const {
return llvm::makeArrayRef(getFinals().end() + 2, varlist_size() + 1);
}
/// Sets the list of the copies of original linear variables.
/// \param PL List of expressions.
void setPrivates(ArrayRef<Expr *> PL);
/// Sets the list of the initial values for linear variables.
/// \param IL List of expressions.
void setInits(ArrayRef<Expr *> IL);
public:
/// Creates clause with a list of variables \a VL and a linear step
/// \a Step.
///
/// \param C AST Context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Modifier Modifier of 'linear' clause.
/// \param ModifierLoc Modifier location.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param PL List of private copies of original variables.
/// \param IL List of initial values for the variables.
/// \param Step Linear step.
/// \param CalcStep Calculation of the linear step.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPLinearClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep,
Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of variables.
static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars);
/// Set modifier.
void setModifier(OpenMPLinearClauseKind Kind) { Modifier = Kind; }
/// Return modifier.
OpenMPLinearClauseKind getModifier() const { return Modifier; }
/// Set modifier location.
void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; }
/// Return modifier location.
SourceLocation getModifierLoc() const { return ModifierLoc; }
/// Sets the location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// Returns the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Returns linear step.
Expr *getStep() { return *(getFinals().end()); }
/// Returns linear step.
const Expr *getStep() const { return *(getFinals().end()); }
/// Returns expression to calculate linear step.
Expr *getCalcStep() { return *(getFinals().end() + 1); }
/// Returns expression to calculate linear step.
const Expr *getCalcStep() const { return *(getFinals().end() + 1); }
/// Sets the list of update expressions for linear variables.
/// \param UL List of expressions.
void setUpdates(ArrayRef<Expr *> UL);
/// Sets the list of final update expressions for linear variables.
/// \param FL List of expressions.
void setFinals(ArrayRef<Expr *> FL);
/// Sets the list of used expressions for the linear clause.
void setUsedExprs(ArrayRef<Expr *> UE);
using privates_iterator = MutableArrayRef<Expr *>::iterator;
using privates_const_iterator = ArrayRef<const Expr *>::iterator;
using privates_range = llvm::iterator_range<privates_iterator>;
using privates_const_range = llvm::iterator_range<privates_const_iterator>;
privates_range privates() {
return privates_range(getPrivates().begin(), getPrivates().end());
}
privates_const_range privates() const {
return privates_const_range(getPrivates().begin(), getPrivates().end());
}
using inits_iterator = MutableArrayRef<Expr *>::iterator;
using inits_const_iterator = ArrayRef<const Expr *>::iterator;
using inits_range = llvm::iterator_range<inits_iterator>;
using inits_const_range = llvm::iterator_range<inits_const_iterator>;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
using updates_iterator = MutableArrayRef<Expr *>::iterator;
using updates_const_iterator = ArrayRef<const Expr *>::iterator;
using updates_range = llvm::iterator_range<updates_iterator>;
using updates_const_range = llvm::iterator_range<updates_const_iterator>;
updates_range updates() {
return updates_range(getUpdates().begin(), getUpdates().end());
}
updates_const_range updates() const {
return updates_const_range(getUpdates().begin(), getUpdates().end());
}
using finals_iterator = MutableArrayRef<Expr *>::iterator;
using finals_const_iterator = ArrayRef<const Expr *>::iterator;
using finals_range = llvm::iterator_range<finals_iterator>;
using finals_const_range = llvm::iterator_range<finals_const_iterator>;
finals_range finals() {
return finals_range(getFinals().begin(), getFinals().end());
}
finals_const_range finals() const {
return finals_const_range(getFinals().begin(), getFinals().end());
}
using used_expressions_iterator = MutableArrayRef<Expr *>::iterator;
using used_expressions_const_iterator = ArrayRef<const Expr *>::iterator;
using used_expressions_range =
llvm::iterator_range<used_expressions_iterator>;
using used_expressions_const_range =
llvm::iterator_range<used_expressions_const_iterator>;
used_expressions_range used_expressions() {
return finals_range(getUsedExprs().begin(), getUsedExprs().end());
}
used_expressions_const_range used_expressions() const {
return finals_const_range(getUsedExprs().begin(), getUsedExprs().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPLinearClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPLinearClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_linear;
}
};
/// This represents clause 'aligned' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd aligned(a,b : 8)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'aligned'
/// with variables 'a', 'b' and alignment '8'.
class OMPAlignedClause final
: public OMPVarListClause<OMPAlignedClause>,
private llvm::TrailingObjects<OMPAlignedClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Sets the alignment for clause.
void setAlignment(Expr *A) { *varlist_end() = A; }
/// Build 'aligned' clause with given number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of variables.
OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned NumVars)
: OMPVarListClause<OMPAlignedClause>(llvm::omp::OMPC_aligned, StartLoc,
LParenLoc, EndLoc, NumVars),
ColonLoc(ColonLoc) {}
/// Build an empty clause.
///
/// \param NumVars Number of variables.
explicit OMPAlignedClause(unsigned NumVars)
: OMPVarListClause<OMPAlignedClause>(llvm::omp::OMPC_aligned,
SourceLocation(), SourceLocation(),
SourceLocation(), NumVars) {}
public:
/// Creates clause with a list of variables \a VL and alignment \a A.
///
/// \param C AST Context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param A Alignment.
static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL,
Expr *A);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of variables.
static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars);
/// Sets the location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// Returns the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Returns alignment.
Expr *getAlignment() { return *varlist_end(); }
/// Returns alignment.
const Expr *getAlignment() const { return *varlist_end(); }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPAlignedClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_aligned;
}
};
/// This represents clause 'copyin' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel copyin(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'copyin'
/// with the variables 'a' and 'b'.
class OMPCopyinClause final
: public OMPVarListClause<OMPCopyinClause>,
private llvm::TrailingObjects<OMPCopyinClause, Expr *> {
// Class has 3 additional tail allocated arrays:
// 1. List of helper expressions for proper generation of assignment operation
// required for copyin clause. This list represents sources.
// 2. List of helper expressions for proper generation of assignment operation
// required for copyin clause. This list represents destinations.
// 3. List of helper expressions that represents assignment operation:
// \code
// DstExprs = SrcExprs;
// \endcode
// Required for proper codegen of propagation of master's thread values of
// threadprivate variables to local instances of that variables in other
// implicit threads.
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPCopyinClause>(llvm::omp::OMPC_copyin, StartLoc,
LParenLoc, EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPCopyinClause(unsigned N)
: OMPVarListClause<OMPCopyinClause>(llvm::omp::OMPC_copyin,
SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent source expression in the final
/// assignment statement performed by the copyin clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent destination expression in the final
/// assignment statement performed by the copyin clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign source helper expressions to destination helper expressions
/// correspondingly.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for copyin clause. This list represents
/// sources.
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for copyin clause. This list represents
/// destinations.
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of propagation of master's thread values of
/// threadprivate variables to local instances of that variables in other
/// implicit threads.
static OMPCopyinClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N);
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPCopyinClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_copyin;
}
};
/// This represents clause 'copyprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp single copyprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp single' has clause 'copyprivate'
/// with the variables 'a' and 'b'.
class OMPCopyprivateClause final
: public OMPVarListClause<OMPCopyprivateClause>,
private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPCopyprivateClause>(llvm::omp::OMPC_copyprivate,
StartLoc, LParenLoc, EndLoc, N) {
}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPCopyprivateClause(unsigned N)
: OMPVarListClause<OMPCopyprivateClause>(
llvm::omp::OMPC_copyprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent source expression in the final
/// assignment statement performed by the copyprivate clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent destination expression in the final
/// assignment statement performed by the copyprivate clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign source helper expressions to destination helper expressions
/// correspondingly.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// sources.
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// destinations.
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of final assignment performed by the
/// copyprivate clause.
static OMPCopyprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPCopyprivateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_copyprivate;
}
};
/// This represents implicit clause 'flush' for the '#pragma omp flush'
/// directive.
/// This clause does not exist by itself, it can be only as a part of 'omp
/// flush' directive. This clause is introduced to keep the original structure
/// of \a OMPExecutableDirective class and its derivatives and to use the
/// existing infrastructure of clauses with the list of variables.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has implicit clause 'flush'
/// with the variables 'a' and 'b'.
class OMPFlushClause final
: public OMPVarListClause<OMPFlushClause>,
private llvm::TrailingObjects<OMPFlushClause, Expr *> {
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPFlushClause>(llvm::omp::OMPC_flush, StartLoc,
LParenLoc, EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPFlushClause(unsigned N)
: OMPVarListClause<OMPFlushClause>(llvm::omp::OMPC_flush,
SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
ArrayRef<Expr *> VL);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPFlushClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_flush;
}
};
/// This represents implicit clause 'depobj' for the '#pragma omp depobj'
/// directive.
/// This clause does not exist by itself, it can be only as a part of 'omp
/// depobj' directive. This clause is introduced to keep the original structure
/// of \a OMPExecutableDirective class and its derivatives and to use the
/// existing infrastructure of clauses with the list of variables.
///
/// \code
/// #pragma omp depobj(a) destroy
/// \endcode
/// In this example directive '#pragma omp depobj' has implicit clause 'depobj'
/// with the depobj 'a'.
class OMPDepobjClause final : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Chunk size.
Expr *Depobj = nullptr;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPDepobjClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_depobj, StartLoc, EndLoc),
LParenLoc(LParenLoc) {}
/// Build an empty clause.
///
explicit OMPDepobjClause()
: OMPClause(llvm::omp::OMPC_depobj, SourceLocation(), SourceLocation()) {}
void setDepobj(Expr *E) { Depobj = E; }
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
public:
/// Creates clause.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param Depobj depobj expression associated with the 'depobj' directive.
static OMPDepobjClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, Expr *Depobj);
/// Creates an empty clause.
///
/// \param C AST context.
static OMPDepobjClause *CreateEmpty(const ASTContext &C);
/// Returns depobj expression associated with the clause.
Expr *getDepobj() { return Depobj; }
const Expr *getDepobj() const { return Depobj; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(&Depobj),
reinterpret_cast<Stmt **>(&Depobj) + 1);
}
const_child_range children() const {
auto Children = const_cast<OMPDepobjClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_depobj;
}
};
/// This represents implicit clause 'depend' for the '#pragma omp task'
/// directive.
///
/// \code
/// #pragma omp task depend(in:a,b)
/// \endcode
/// In this example directive '#pragma omp task' with clause 'depend' with the
/// variables 'a' and 'b' with dependency 'in'.
class OMPDependClause final
: public OMPVarListClause<OMPDependClause>,
private llvm::TrailingObjects<OMPDependClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Dependency type (one of in, out, inout).
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
/// Dependency type location.
SourceLocation DepLoc;
/// Colon location.
SourceLocation ColonLoc;
/// Number of loops, associated with the depend clause.
unsigned NumLoops = 0;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N, unsigned NumLoops)
: OMPVarListClause<OMPDependClause>(llvm::omp::OMPC_depend, StartLoc,
LParenLoc, EndLoc, N),
NumLoops(NumLoops) {}
/// Build an empty clause.
///
/// \param N Number of variables.
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
explicit OMPDependClause(unsigned N, unsigned NumLoops)
: OMPVarListClause<OMPDependClause>(llvm::omp::OMPC_depend,
SourceLocation(), SourceLocation(),
SourceLocation(), N),
NumLoops(NumLoops) {}
/// Set dependency kind.
void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; }
/// Set dependency kind and its location.
void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; }
/// Set colon location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// Sets optional dependency modifier.
void setModifier(Expr *DepModifier);
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param DepKind Dependency type.
/// \param DepLoc Location of the dependency type.
/// \param ColonLoc Colon location.
/// \param VL List of references to the variables.
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
static OMPDependClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, Expr *DepModifier,
OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VL, unsigned NumLoops);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N,
unsigned NumLoops);
/// Get dependency type.
OpenMPDependClauseKind getDependencyKind() const { return DepKind; }
/// Return optional depend modifier.
Expr *getModifier();
const Expr *getModifier() const {
return const_cast<OMPDependClause *>(this)->getModifier();
}
/// Get dependency type location.
SourceLocation getDependencyLoc() const { return DepLoc; }
/// Get colon location.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Get number of loops associated with the clause.
unsigned getNumLoops() const { return NumLoops; }
/// Set the loop data for the depend clauses with 'sink|source' kind of
/// dependency.
void setLoopData(unsigned NumLoop, Expr *Cnt);
/// Get the loop data.
Expr *getLoopData(unsigned NumLoop);
const Expr *getLoopData(unsigned NumLoop) const;
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPDependClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_depend;
}
};
/// This represents 'device' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp target device(a)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'device'
/// with single expression 'a'.
class OMPDeviceClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Device clause modifier.
OpenMPDeviceClauseModifier Modifier = OMPC_DEVICE_unknown;
/// Location of the modifier.
SourceLocation ModifierLoc;
/// Device number.
Stmt *Device = nullptr;
/// Set the device number.
///
/// \param E Device number.
void setDevice(Expr *E) { Device = E; }
/// Sets modifier.
void setModifier(OpenMPDeviceClauseModifier M) { Modifier = M; }
/// Setst modifier location.
void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; }
public:
/// Build 'device' clause.
///
/// \param Modifier Clause modifier.
/// \param E Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param ModifierLoc Modifier location.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *E, Stmt *HelperE,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ModifierLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_device, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Modifier(Modifier),
ModifierLoc(ModifierLoc), Device(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// Build an empty clause.
OMPDeviceClause()
: OMPClause(llvm::omp::OMPC_device, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return device number.
Expr *getDevice() { return cast<Expr>(Device); }
/// Return device number.
Expr *getDevice() const { return cast<Expr>(Device); }
/// Gets modifier.
OpenMPDeviceClauseModifier getModifier() const { return Modifier; }
/// Gets modifier location.
SourceLocation getModifierLoc() const { return ModifierLoc; }
child_range children() { return child_range(&Device, &Device + 1); }
const_child_range children() const {
return const_child_range(&Device, &Device + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_device;
}
};
/// This represents 'threads' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp ordered threads
/// \endcode
/// In this example directive '#pragma omp ordered' has simple 'threads' clause.
class OMPThreadsClause : public OMPClause {
public:
/// Build 'threads' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_threads, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPThreadsClause()
: OMPClause(llvm::omp::OMPC_threads, SourceLocation(), SourceLocation()) {
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_threads;
}
};
/// This represents 'simd' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp ordered simd
/// \endcode
/// In this example directive '#pragma omp ordered' has simple 'simd' clause.
class OMPSIMDClause : public OMPClause {
public:
/// Build 'simd' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_simd, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPSIMDClause()
: OMPClause(llvm::omp::OMPC_simd, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_simd;
}
};
/// Struct that defines common infrastructure to handle mappable
/// expressions used in OpenMP clauses.
class OMPClauseMappableExprCommon {
public:
/// Class that represents a component of a mappable expression. E.g.
/// for an expression S.a, the first component is a declaration reference
/// expression associated with 'S' and the second is a member expression
/// associated with the field declaration 'a'. If the expression is an array
/// subscript it may not have any associated declaration. In that case the
/// associated declaration is set to nullptr.
class MappableComponent {
/// Expression associated with the component.
Expr *AssociatedExpression = nullptr;
/// Declaration associated with the declaration. If the component does
/// not have a declaration (e.g. array subscripts or section), this is set
/// to nullptr.
ValueDecl *AssociatedDeclaration = nullptr;
public:
explicit MappableComponent() = default;
explicit MappableComponent(Expr *AssociatedExpression,
ValueDecl *AssociatedDeclaration)
: AssociatedExpression(AssociatedExpression),
AssociatedDeclaration(
AssociatedDeclaration
? cast<ValueDecl>(AssociatedDeclaration->getCanonicalDecl())
: nullptr) {}
Expr *getAssociatedExpression() const { return AssociatedExpression; }
ValueDecl *getAssociatedDeclaration() const {
return AssociatedDeclaration;
}
};
// List of components of an expression. This first one is the whole
// expression and the last one is the base expression.
using MappableExprComponentList = SmallVector<MappableComponent, 8>;
using MappableExprComponentListRef = ArrayRef<MappableComponent>;
// List of all component lists associated to the same base declaration.
// E.g. if both 'S.a' and 'S.b' are a mappable expressions, each will have
// their component list but the same base declaration 'S'.
using MappableExprComponentLists = SmallVector<MappableExprComponentList, 8>;
using MappableExprComponentListsRef = ArrayRef<MappableExprComponentList>;
protected:
// Return the total number of elements in a list of component lists.
static unsigned
getComponentsTotalNumber(MappableExprComponentListsRef ComponentLists);
// Return the total number of elements in a list of declarations. All
// declarations are expected to be canonical.
static unsigned
getUniqueDeclarationsTotalNumber(ArrayRef<const ValueDecl *> Declarations);
};
/// This structure contains all sizes needed for by an
/// OMPMappableExprListClause.
struct OMPMappableExprListSizeTy {
/// Number of expressions listed.
unsigned NumVars;
/// Number of unique base declarations.
unsigned NumUniqueDeclarations;
/// Number of component lists.
unsigned NumComponentLists;
/// Total number of expression components.
unsigned NumComponents;
OMPMappableExprListSizeTy() = default;
OMPMappableExprListSizeTy(unsigned NumVars, unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: NumVars(NumVars), NumUniqueDeclarations(NumUniqueDeclarations),
NumComponentLists(NumComponentLists), NumComponents(NumComponents) {}
};
/// This represents clauses with a list of expressions that are mappable.
/// Examples of these clauses are 'map' in
/// '#pragma omp target [enter|exit] [data]...' directives, and 'to' and 'from
/// in '#pragma omp target update...' directives.
template <class T>
class OMPMappableExprListClause : public OMPVarListClause<T>,
public OMPClauseMappableExprCommon {
friend class OMPClauseReader;
/// Number of unique declarations in this clause.
unsigned NumUniqueDeclarations;
/// Number of component lists in this clause.
unsigned NumComponentLists;
/// Total number of components in this clause.
unsigned NumComponents;
/// Whether this clause is possible to have user-defined mappers associated.
/// It should be true for map, to, and from clauses, and false for
/// use_device_ptr and is_device_ptr.
const bool SupportsMapper;
/// C++ nested name specifier for the associated user-defined mapper.
NestedNameSpecifierLoc MapperQualifierLoc;
/// The associated user-defined mapper identifier information.
DeclarationNameInfo MapperIdInfo;
protected:
/// Build a clause for \a NumUniqueDeclarations declarations, \a
/// NumComponentLists total component lists, and \a NumComponents total
/// components.
///
/// \param K Kind of the clause.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
/// \param SupportsMapper Indicates whether this clause is possible to have
/// user-defined mappers associated.
/// \param MapperQualifierLocPtr C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperIdInfoPtr The identifier of associated user-defined mapper.
OMPMappableExprListClause(
OpenMPClauseKind K, const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes, bool SupportsMapper = false,
NestedNameSpecifierLoc *MapperQualifierLocPtr = nullptr,
DeclarationNameInfo *MapperIdInfoPtr = nullptr)
: OMPVarListClause<T>(K, Locs.StartLoc, Locs.LParenLoc, Locs.EndLoc,
Sizes.NumVars),
NumUniqueDeclarations(Sizes.NumUniqueDeclarations),
NumComponentLists(Sizes.NumComponentLists),
NumComponents(Sizes.NumComponents), SupportsMapper(SupportsMapper) {
if (MapperQualifierLocPtr)
MapperQualifierLoc = *MapperQualifierLocPtr;
if (MapperIdInfoPtr)
MapperIdInfo = *MapperIdInfoPtr;
}
/// Get the unique declarations that are in the trailing objects of the
/// class.
MutableArrayRef<ValueDecl *> getUniqueDeclsRef() {
return MutableArrayRef<ValueDecl *>(
static_cast<T *>(this)->template getTrailingObjects<ValueDecl *>(),
NumUniqueDeclarations);
}
/// Get the unique declarations that are in the trailing objects of the
/// class.
ArrayRef<ValueDecl *> getUniqueDeclsRef() const {
return ArrayRef<ValueDecl *>(
static_cast<const T *>(this)
->template getTrailingObjects<ValueDecl *>(),
NumUniqueDeclarations);
}
/// Set the unique declarations that are in the trailing objects of the
/// class.
void setUniqueDecls(ArrayRef<ValueDecl *> UDs) {
assert(UDs.size() == NumUniqueDeclarations &&
"Unexpected amount of unique declarations.");
std::copy(UDs.begin(), UDs.end(), getUniqueDeclsRef().begin());
}
/// Get the number of lists per declaration that are in the trailing
/// objects of the class.
MutableArrayRef<unsigned> getDeclNumListsRef() {
return MutableArrayRef<unsigned>(
static_cast<T *>(this)->template getTrailingObjects<unsigned>(),
NumUniqueDeclarations);
}
/// Get the number of lists per declaration that are in the trailing
/// objects of the class.
ArrayRef<unsigned> getDeclNumListsRef() const {
return ArrayRef<unsigned>(
static_cast<const T *>(this)->template getTrailingObjects<unsigned>(),
NumUniqueDeclarations);
}
/// Set the number of lists per declaration that are in the trailing
/// objects of the class.
void setDeclNumLists(ArrayRef<unsigned> DNLs) {
assert(DNLs.size() == NumUniqueDeclarations &&
"Unexpected amount of list numbers.");
std::copy(DNLs.begin(), DNLs.end(), getDeclNumListsRef().begin());
}
/// Get the cumulative component lists sizes that are in the trailing
/// objects of the class. They are appended after the number of lists.
MutableArrayRef<unsigned> getComponentListSizesRef() {
return MutableArrayRef<unsigned>(
static_cast<T *>(this)->template getTrailingObjects<unsigned>() +
NumUniqueDeclarations,
NumComponentLists);
}
/// Get the cumulative component lists sizes that are in the trailing
/// objects of the class. They are appended after the number of lists.
ArrayRef<unsigned> getComponentListSizesRef() const {
return ArrayRef<unsigned>(
static_cast<const T *>(this)->template getTrailingObjects<unsigned>() +
NumUniqueDeclarations,
NumComponentLists);
}
/// Set the cumulative component lists sizes that are in the trailing
/// objects of the class.
void setComponentListSizes(ArrayRef<unsigned> CLSs) {
assert(CLSs.size() == NumComponentLists &&
"Unexpected amount of component lists.");
std::copy(CLSs.begin(), CLSs.end(), getComponentListSizesRef().begin());
}
/// Get the components that are in the trailing objects of the class.
MutableArrayRef<MappableComponent> getComponentsRef() {
return MutableArrayRef<MappableComponent>(
static_cast<T *>(this)
->template getTrailingObjects<MappableComponent>(),
NumComponents);
}
/// Get the components that are in the trailing objects of the class.
ArrayRef<MappableComponent> getComponentsRef() const {
return ArrayRef<MappableComponent>(
static_cast<const T *>(this)
->template getTrailingObjects<MappableComponent>(),
NumComponents);
}
/// Set the components that are in the trailing objects of the class.
/// This requires the list sizes so that it can also fill the original
/// expressions, which are the first component of each list.
void setComponents(ArrayRef<MappableComponent> Components,
ArrayRef<unsigned> CLSs) {
assert(Components.size() == NumComponents &&
"Unexpected amount of component lists.");
assert(CLSs.size() == NumComponentLists &&
"Unexpected amount of list sizes.");
std::copy(Components.begin(), Components.end(), getComponentsRef().begin());
}
/// Fill the clause information from the list of declarations and
/// associated component lists.
void setClauseInfo(ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists) {
// Perform some checks to make sure the data sizes are consistent with the
// information available when the clause was created.
assert(getUniqueDeclarationsTotalNumber(Declarations) ==
NumUniqueDeclarations &&
"Unexpected number of mappable expression info entries!");
assert(getComponentsTotalNumber(ComponentLists) == NumComponents &&
"Unexpected total number of components!");
assert(Declarations.size() == ComponentLists.size() &&
"Declaration and component lists size is not consistent!");
assert(Declarations.size() == NumComponentLists &&
"Unexpected declaration and component lists size!");
// Organize the components by declaration and retrieve the original
// expression. Original expressions are always the first component of the
// mappable component list.
llvm::MapVector<ValueDecl *, SmallVector<MappableExprComponentListRef, 8>>
ComponentListMap;
{
auto CI = ComponentLists.begin();
for (auto DI = Declarations.begin(), DE = Declarations.end(); DI != DE;
++DI, ++CI) {
assert(!CI->empty() && "Invalid component list!");
ComponentListMap[*DI].push_back(*CI);
}
}
// Iterators of the target storage.
auto UniqueDeclarations = getUniqueDeclsRef();
auto UDI = UniqueDeclarations.begin();
auto DeclNumLists = getDeclNumListsRef();
auto DNLI = DeclNumLists.begin();
auto ComponentListSizes = getComponentListSizesRef();
auto CLSI = ComponentListSizes.begin();
auto Components = getComponentsRef();
auto CI = Components.begin();
// Variable to compute the accumulation of the number of components.
unsigned PrevSize = 0u;
// Scan all the declarations and associated component lists.
for (auto &M : ComponentListMap) {
// The declaration.
auto *D = M.first;
// The component lists.
auto CL = M.second;
// Initialize the entry.
*UDI = D;
++UDI;
*DNLI = CL.size();
++DNLI;
// Obtain the cumulative sizes and concatenate all the components in the
// reserved storage.
for (auto C : CL) {
// Accumulate with the previous size.
PrevSize += C.size();
// Save the size.
*CLSI = PrevSize;
++CLSI;
// Append components after the current components iterator.
CI = std::copy(C.begin(), C.end(), CI);
}
}
}
/// Set the nested name specifier of associated user-defined mapper.
void setMapperQualifierLoc(NestedNameSpecifierLoc NNSL) {
MapperQualifierLoc = NNSL;
}
/// Set the name of associated user-defined mapper.
void setMapperIdInfo(DeclarationNameInfo MapperId) {
MapperIdInfo = MapperId;
}
/// Get the user-defined mapper references that are in the trailing objects of
/// the class.
MutableArrayRef<Expr *> getUDMapperRefs() {
assert(SupportsMapper &&
"Must be a clause that is possible to have user-defined mappers");
return llvm::makeMutableArrayRef<Expr *>(
static_cast<T *>(this)->template getTrailingObjects<Expr *>() +
OMPVarListClause<T>::varlist_size(),
OMPVarListClause<T>::varlist_size());
}
/// Get the user-defined mappers references that are in the trailing objects
/// of the class.
ArrayRef<Expr *> getUDMapperRefs() const {
assert(SupportsMapper &&
"Must be a clause that is possible to have user-defined mappers");
return llvm::makeArrayRef<Expr *>(
static_cast<const T *>(this)->template getTrailingObjects<Expr *>() +
OMPVarListClause<T>::varlist_size(),
OMPVarListClause<T>::varlist_size());
}
/// Set the user-defined mappers that are in the trailing objects of the
/// class.
void setUDMapperRefs(ArrayRef<Expr *> DMDs) {
assert(DMDs.size() == OMPVarListClause<T>::varlist_size() &&
"Unexpected number of user-defined mappers.");
assert(SupportsMapper &&
"Must be a clause that is possible to have user-defined mappers");
std::copy(DMDs.begin(), DMDs.end(), getUDMapperRefs().begin());
}
public:
/// Return the number of unique base declarations in this clause.
unsigned getUniqueDeclarationsNum() const { return NumUniqueDeclarations; }
/// Return the number of lists derived from the clause expressions.
unsigned getTotalComponentListNum() const { return NumComponentLists; }
/// Return the total number of components in all lists derived from the
/// clause.
unsigned getTotalComponentsNum() const { return NumComponents; }
/// Gets the nested name specifier for associated user-defined mapper.
NestedNameSpecifierLoc getMapperQualifierLoc() const {
return MapperQualifierLoc;
}
/// Gets the name info for associated user-defined mapper.
const DeclarationNameInfo &getMapperIdInfo() const { return MapperIdInfo; }
/// Iterator that browse the components by lists. It also allows
/// browsing components of a single declaration.
class const_component_lists_iterator
: public llvm::iterator_adaptor_base<
const_component_lists_iterator,
MappableExprComponentListRef::const_iterator,
std::forward_iterator_tag, MappableComponent, ptrdiff_t,
MappableComponent, MappableComponent> {
// The declaration the iterator currently refers to.
ArrayRef<ValueDecl *>::iterator DeclCur;
// The list number associated with the current declaration.
ArrayRef<unsigned>::iterator NumListsCur;
// Whether this clause is possible to have user-defined mappers associated.
const bool SupportsMapper;
// The user-defined mapper associated with the current declaration.
ArrayRef<Expr *>::iterator MapperCur;
// Remaining lists for the current declaration.
unsigned RemainingLists = 0;
// The cumulative size of the previous list, or zero if there is no previous
// list.
unsigned PrevListSize = 0;
// The cumulative sizes of the current list - it will delimit the remaining
// range of interest.
ArrayRef<unsigned>::const_iterator ListSizeCur;
ArrayRef<unsigned>::const_iterator ListSizeEnd;
// Iterator to the end of the components storage.
MappableExprComponentListRef::const_iterator End;
public:
/// Construct an iterator that scans all lists.
explicit const_component_lists_iterator(
ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum,
ArrayRef<unsigned> CumulativeListSizes,
MappableExprComponentListRef Components, bool SupportsMapper,
ArrayRef<Expr *> Mappers)
: const_component_lists_iterator::iterator_adaptor_base(
Components.begin()),
DeclCur(UniqueDecls.begin()), NumListsCur(DeclsListNum.begin()),
SupportsMapper(SupportsMapper),
ListSizeCur(CumulativeListSizes.begin()),
ListSizeEnd(CumulativeListSizes.end()), End(Components.end()) {
assert(UniqueDecls.size() == DeclsListNum.size() &&
"Inconsistent number of declarations and list sizes!");
if (!DeclsListNum.empty())
RemainingLists = *NumListsCur;
if (SupportsMapper)
MapperCur = Mappers.begin();
}
/// Construct an iterator that scan lists for a given declaration \a
/// Declaration.
explicit const_component_lists_iterator(
const ValueDecl *Declaration, ArrayRef<ValueDecl *> UniqueDecls,
ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes,
MappableExprComponentListRef Components, bool SupportsMapper,
ArrayRef<Expr *> Mappers)
: const_component_lists_iterator(UniqueDecls, DeclsListNum,
CumulativeListSizes, Components,
SupportsMapper, Mappers) {
// Look for the desired declaration. While we are looking for it, we
// update the state so that we know the component where a given list
// starts.
for (; DeclCur != UniqueDecls.end(); ++DeclCur, ++NumListsCur) {
if (*DeclCur == Declaration)
break;
assert(*NumListsCur > 0 && "No lists associated with declaration??");
// Skip the lists associated with the current declaration, but save the
// last list size that was skipped.
std::advance(ListSizeCur, *NumListsCur - 1);
PrevListSize = *ListSizeCur;
++ListSizeCur;
if (SupportsMapper)
++MapperCur;
}
// If we didn't find any declaration, advance the iterator to after the
// last component and set remaining lists to zero.
if (ListSizeCur == CumulativeListSizes.end()) {
this->I = End;
RemainingLists = 0u;
return;
}
// Set the remaining lists with the total number of lists of the current
// declaration.
RemainingLists = *NumListsCur;
// Adjust the list size end iterator to the end of the relevant range.
ListSizeEnd = ListSizeCur;
std::advance(ListSizeEnd, RemainingLists);
// Given that the list sizes are cumulative, the index of the component
// that start the list is the size of the previous list.
std::advance(this->I, PrevListSize);
}
// Return the array with the current list. The sizes are cumulative, so the
// array size is the difference between the current size and previous one.
std::tuple<const ValueDecl *, MappableExprComponentListRef,
const ValueDecl *>
operator*() const {
assert(ListSizeCur != ListSizeEnd && "Invalid iterator!");
const ValueDecl *Mapper = nullptr;
if (SupportsMapper && *MapperCur)
Mapper = cast<ValueDecl>(cast<DeclRefExpr>(*MapperCur)->getDecl());
return std::make_tuple(
*DeclCur,
MappableExprComponentListRef(&*this->I, *ListSizeCur - PrevListSize),
Mapper);
}
std::tuple<const ValueDecl *, MappableExprComponentListRef,
const ValueDecl *>
operator->() const {
return **this;
}
// Skip the components of the current list.
const_component_lists_iterator &operator++() {
assert(ListSizeCur != ListSizeEnd && RemainingLists &&
"Invalid iterator!");
// If we don't have more lists just skip all the components. Otherwise,
// advance the iterator by the number of components in the current list.
if (std::next(ListSizeCur) == ListSizeEnd) {
this->I = End;
RemainingLists = 0;
} else {
std::advance(this->I, *ListSizeCur - PrevListSize);
PrevListSize = *ListSizeCur;
// We are done with a declaration, move to the next one.
if (!(--RemainingLists)) {
++DeclCur;
++NumListsCur;
if (SupportsMapper)
++MapperCur;
RemainingLists = *NumListsCur;
assert(RemainingLists && "No lists in the following declaration??");
}
}
++ListSizeCur;
return *this;
}
};
using const_component_lists_range =
llvm::iterator_range<const_component_lists_iterator>;
/// Iterators for all component lists.
const_component_lists_iterator component_lists_begin() const {
return const_component_lists_iterator(
getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(),
getComponentsRef(), SupportsMapper,
SupportsMapper ? getUDMapperRefs() : llvm::None);
}
const_component_lists_iterator component_lists_end() const {
return const_component_lists_iterator(
ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(),
MappableExprComponentListRef(getComponentsRef().end(),
getComponentsRef().end()),
SupportsMapper, llvm::None);
}
const_component_lists_range component_lists() const {
return {component_lists_begin(), component_lists_end()};
}
/// Iterators for component lists associated with the provided
/// declaration.
const_component_lists_iterator
decl_component_lists_begin(const ValueDecl *VD) const {
return const_component_lists_iterator(
VD, getUniqueDeclsRef(), getDeclNumListsRef(),
getComponentListSizesRef(), getComponentsRef(), SupportsMapper,
SupportsMapper ? getUDMapperRefs() : llvm::None);
}
const_component_lists_iterator decl_component_lists_end() const {
return component_lists_end();
}
const_component_lists_range decl_component_lists(const ValueDecl *VD) const {
return {decl_component_lists_begin(VD), decl_component_lists_end()};
}
/// Iterators to access all the declarations, number of lists, list sizes, and
/// components.
using const_all_decls_iterator = ArrayRef<ValueDecl *>::iterator;
using const_all_decls_range = llvm::iterator_range<const_all_decls_iterator>;
const_all_decls_range all_decls() const {
auto A = getUniqueDeclsRef();
return const_all_decls_range(A.begin(), A.end());
}
using const_all_num_lists_iterator = ArrayRef<unsigned>::iterator;
using const_all_num_lists_range =
llvm::iterator_range<const_all_num_lists_iterator>;
const_all_num_lists_range all_num_lists() const {
auto A = getDeclNumListsRef();
return const_all_num_lists_range(A.begin(), A.end());
}
using const_all_lists_sizes_iterator = ArrayRef<unsigned>::iterator;
using const_all_lists_sizes_range =
llvm::iterator_range<const_all_lists_sizes_iterator>;
const_all_lists_sizes_range all_lists_sizes() const {
auto A = getComponentListSizesRef();
return const_all_lists_sizes_range(A.begin(), A.end());
}
using const_all_components_iterator = ArrayRef<MappableComponent>::iterator;
using const_all_components_range =
llvm::iterator_range<const_all_components_iterator>;
const_all_components_range all_components() const {
auto A = getComponentsRef();
return const_all_components_range(A.begin(), A.end());
}
using mapperlist_iterator = MutableArrayRef<Expr *>::iterator;
using mapperlist_const_iterator = ArrayRef<const Expr *>::iterator;
using mapperlist_range = llvm::iterator_range<mapperlist_iterator>;
using mapperlist_const_range =
llvm::iterator_range<mapperlist_const_iterator>;
mapperlist_iterator mapperlist_begin() { return getUDMapperRefs().begin(); }
mapperlist_iterator mapperlist_end() { return getUDMapperRefs().end(); }
mapperlist_const_iterator mapperlist_begin() const {
return getUDMapperRefs().begin();
}
mapperlist_const_iterator mapperlist_end() const {
return getUDMapperRefs().end();
}
mapperlist_range mapperlists() {
return mapperlist_range(mapperlist_begin(), mapperlist_end());
}
mapperlist_const_range mapperlists() const {
return mapperlist_const_range(mapperlist_begin(), mapperlist_end());
}
};
/// This represents clause 'map' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target map(a,b)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'map'
/// with the variables 'a' and 'b'.
class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>,
private llvm::TrailingObjects<
OMPMapClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
// There are varlist_size() of expressions, and varlist_size() of
// user-defined mappers.
return 2 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
private:
/// Map-type-modifiers for the 'map' clause.
OpenMPMapModifierKind MapTypeModifiers[NumberOfOMPMapClauseModifiers] = {
OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown,
OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown};
/// Location of map-type-modifiers for the 'map' clause.
SourceLocation MapTypeModifiersLoc[NumberOfOMPMapClauseModifiers];
/// Map type for the 'map' clause.
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
/// Is this an implicit map type or not.
bool MapTypeIsImplicit = false;
/// Location of the map type.
SourceLocation MapLoc;
/// Colon location.
SourceLocation ColonLoc;
/// Build a clause for \a NumVars listed expressions, \a
/// NumUniqueDeclarations declarations, \a NumComponentLists total component
/// lists, and \a NumComponents total expression components.
///
/// \param MapModifiers Map-type-modifiers.
/// \param MapModifiersLoc Locations of map-type-modifiers.
/// \param MapperQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperIdInfo The identifier of associated user-defined mapper.
/// \param MapType Map type.
/// \param MapTypeIsImplicit Map type is inferred implicitly.
/// \param MapLoc Location of the map type.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPMapClause(ArrayRef<OpenMPMapModifierKind> MapModifiers,
ArrayRef<SourceLocation> MapModifiersLoc,
NestedNameSpecifierLoc MapperQualifierLoc,
DeclarationNameInfo MapperIdInfo,
OpenMPMapClauseKind MapType, bool MapTypeIsImplicit,
SourceLocation MapLoc, const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(llvm::omp::OMPC_map, Locs, Sizes,
/*SupportsMapper=*/true, &MapperQualifierLoc,
&MapperIdInfo),
MapType(MapType), MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) {
assert(llvm::array_lengthof(MapTypeModifiers) == MapModifiers.size() &&
"Unexpected number of map type modifiers.");
llvm::copy(MapModifiers, std::begin(MapTypeModifiers));
assert(llvm::array_lengthof(MapTypeModifiersLoc) ==
MapModifiersLoc.size() &&
"Unexpected number of map type modifier locations.");
llvm::copy(MapModifiersLoc, std::begin(MapTypeModifiersLoc));
}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPMapClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(llvm::omp::OMPC_map, OMPVarListLocTy(), Sizes,
/*SupportsMapper=*/true) {}
/// Set map-type-modifier for the clause.
///
/// \param I index for map-type-modifier.
/// \param T map-type-modifier for the clause.
void setMapTypeModifier(unsigned I, OpenMPMapModifierKind T) {
assert(I < NumberOfOMPMapClauseModifiers &&
"Unexpected index to store map type modifier, exceeds array size.");
MapTypeModifiers[I] = T;
}
/// Set location for the map-type-modifier.
///
/// \param I index for map-type-modifier location.
/// \param TLoc map-type-modifier location.
void setMapTypeModifierLoc(unsigned I, SourceLocation TLoc) {
assert(I < NumberOfOMPMapClauseModifiers &&
"Index to store map type modifier location exceeds array size.");
MapTypeModifiersLoc[I] = TLoc;
}
/// Set type for the clause.
///
/// \param T Type for the clause.
void setMapType(OpenMPMapClauseKind T) { MapType = T; }
/// Set type location.
///
/// \param TLoc Type location.
void setMapLoc(SourceLocation TLoc) { MapLoc = TLoc; }
/// Set colon location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
/// \param UDMapperRefs References to user-defined mappers associated with
/// expressions used in the clause.
/// \param MapModifiers Map-type-modifiers.
/// \param MapModifiersLoc Location of map-type-modifiers.
/// \param UDMQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperId The identifier of associated user-defined mapper.
/// \param Type Map type.
/// \param TypeIsImplicit Map type is inferred implicitly.
/// \param TypeLoc Location of the map type.
static OMPMapClause *
Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists,
ArrayRef<Expr *> UDMapperRefs,
ArrayRef<OpenMPMapModifierKind> MapModifiers,
ArrayRef<SourceLocation> MapModifiersLoc,
NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId,
OpenMPMapClauseKind Type, bool TypeIsImplicit, SourceLocation TypeLoc);
/// Creates an empty clause with the place for \a NumVars original
/// expressions, \a NumUniqueDeclarations declarations, \NumComponentLists
/// lists, and \a NumComponents expression components.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPMapClause *CreateEmpty(const ASTContext &C,
const OMPMappableExprListSizeTy &Sizes);
/// Fetches mapping kind for the clause.
OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; }
/// Is this an implicit map type?
/// We have to capture 'IsMapTypeImplicit' from the parser for more
/// informative error messages. It helps distinguish map(r) from
/// map(tofrom: r), which is important to print more helpful error
/// messages for some target directives.
bool isImplicitMapType() const LLVM_READONLY { return MapTypeIsImplicit; }
/// Fetches the map-type-modifier at 'Cnt' index of array of modifiers.
///
/// \param Cnt index for map-type-modifier.
OpenMPMapModifierKind getMapTypeModifier(unsigned Cnt) const LLVM_READONLY {
assert(Cnt < NumberOfOMPMapClauseModifiers &&
"Requested modifier exceeds the total number of modifiers.");
return MapTypeModifiers[Cnt];
}
/// Fetches the map-type-modifier location at 'Cnt' index of array of
/// modifiers' locations.
///
/// \param Cnt index for map-type-modifier location.
SourceLocation getMapTypeModifierLoc(unsigned Cnt) const LLVM_READONLY {
assert(Cnt < NumberOfOMPMapClauseModifiers &&
"Requested modifier location exceeds total number of modifiers.");
return MapTypeModifiersLoc[Cnt];
}
/// Fetches ArrayRef of map-type-modifiers.
ArrayRef<OpenMPMapModifierKind> getMapTypeModifiers() const LLVM_READONLY {
return llvm::makeArrayRef(MapTypeModifiers);
}
/// Fetches ArrayRef of location of map-type-modifiers.
ArrayRef<SourceLocation> getMapTypeModifiersLoc() const LLVM_READONLY {
return llvm::makeArrayRef(MapTypeModifiersLoc);
}
/// Fetches location of clause mapping kind.
SourceLocation getMapLoc() const LLVM_READONLY { return MapLoc; }
/// Get colon location.
SourceLocation getColonLoc() const { return ColonLoc; }
child_range children() {
return child_range(
reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPMapClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
if (MapType == OMPC_MAP_to || MapType == OMPC_MAP_tofrom)
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
auto Children = const_cast<OMPMapClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_map;
}
};
/// This represents 'num_teams' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp teams num_teams(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'num_teams'
/// with single expression 'n'.
class OMPNumTeamsClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// NumTeams number.
Stmt *NumTeams = nullptr;
/// Set the NumTeams number.
///
/// \param E NumTeams number.
void setNumTeams(Expr *E) { NumTeams = E; }
public:
/// Build 'num_teams' clause.
///
/// \param E Expression associated with this clause.
/// \param HelperE Helper Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPNumTeamsClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_num_teams, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTeams(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// Build an empty clause.
OMPNumTeamsClause()
: OMPClause(llvm::omp::OMPC_num_teams, SourceLocation(),
SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return NumTeams number.
Expr *getNumTeams() { return cast<Expr>(NumTeams); }
/// Return NumTeams number.
Expr *getNumTeams() const { return cast<Expr>(NumTeams); }
child_range children() { return child_range(&NumTeams, &NumTeams + 1); }
const_child_range children() const {
return const_child_range(&NumTeams, &NumTeams + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_num_teams;
}
};
/// This represents 'thread_limit' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp teams thread_limit(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'thread_limit'
/// with single expression 'n'.
class OMPThreadLimitClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// ThreadLimit number.
Stmt *ThreadLimit = nullptr;
/// Set the ThreadLimit number.
///
/// \param E ThreadLimit number.
void setThreadLimit(Expr *E) { ThreadLimit = E; }
public:
/// Build 'thread_limit' clause.
///
/// \param E Expression associated with this clause.
/// \param HelperE Helper Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPThreadLimitClause(Expr *E, Stmt *HelperE,
OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_thread_limit, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadLimit(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// Build an empty clause.
OMPThreadLimitClause()
: OMPClause(llvm::omp::OMPC_thread_limit, SourceLocation(),
SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return ThreadLimit number.
Expr *getThreadLimit() { return cast<Expr>(ThreadLimit); }
/// Return ThreadLimit number.
Expr *getThreadLimit() const { return cast<Expr>(ThreadLimit); }
child_range children() { return child_range(&ThreadLimit, &ThreadLimit + 1); }
const_child_range children() const {
return const_child_range(&ThreadLimit, &ThreadLimit + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_thread_limit;
}
};
/// This represents 'priority' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp task priority(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'priority' with
/// single expression 'n'.
class OMPPriorityClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Priority number.
Stmt *Priority = nullptr;
/// Set the Priority number.
///
/// \param E Priority number.
void setPriority(Expr *E) { Priority = E; }
public:
/// Build 'priority' clause.
///
/// \param Priority Expression associated with this clause.
/// \param HelperPriority Helper priority for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPPriorityClause(Expr *Priority, Stmt *HelperPriority,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_priority, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Priority(Priority) {
setPreInitStmt(HelperPriority, CaptureRegion);
}
/// Build an empty clause.
OMPPriorityClause()
: OMPClause(llvm::omp::OMPC_priority, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return Priority number.
Expr *getPriority() { return cast<Expr>(Priority); }
/// Return Priority number.
Expr *getPriority() const { return cast<Expr>(Priority); }
child_range children() { return child_range(&Priority, &Priority + 1); }
const_child_range children() const {
return const_child_range(&Priority, &Priority + 1);
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPPriorityClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_priority;
}
};
/// This represents 'grainsize' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp taskloop grainsize(4)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clause 'grainsize'
/// with single expression '4'.
class OMPGrainsizeClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *Grainsize = nullptr;
/// Set safelen.
void setGrainsize(Expr *Size) { Grainsize = Size; }
public:
/// Build 'grainsize' clause.
///
/// \param Size Expression associated with this clause.
/// \param HelperSize Helper grainsize for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPGrainsizeClause(Expr *Size, Stmt *HelperSize,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_grainsize, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Grainsize(Size) {
setPreInitStmt(HelperSize, CaptureRegion);
}
/// Build an empty clause.
explicit OMPGrainsizeClause()
: OMPClause(llvm::omp::OMPC_grainsize, SourceLocation(),
SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); }
child_range children() { return child_range(&Grainsize, &Grainsize + 1); }
const_child_range children() const {
return const_child_range(&Grainsize, &Grainsize + 1);
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPGrainsizeClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_grainsize;
}
};
/// This represents 'nogroup' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp taskloop nogroup
/// \endcode
/// In this example directive '#pragma omp taskloop' has 'nogroup' clause.
class OMPNogroupClause : public OMPClause {
public:
/// Build 'nogroup' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_nogroup, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPNogroupClause()
: OMPClause(llvm::omp::OMPC_nogroup, SourceLocation(), SourceLocation()) {
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_nogroup;
}
};
/// This represents 'num_tasks' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp taskloop num_tasks(4)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clause 'num_tasks'
/// with single expression '4'.
class OMPNumTasksClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *NumTasks = nullptr;
/// Set safelen.
void setNumTasks(Expr *Size) { NumTasks = Size; }
public:
/// Build 'num_tasks' clause.
///
/// \param Size Expression associated with this clause.
/// \param HelperSize Helper grainsize for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPNumTasksClause(Expr *Size, Stmt *HelperSize,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_num_tasks, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTasks(Size) {
setPreInitStmt(HelperSize, CaptureRegion);
}
/// Build an empty clause.
explicit OMPNumTasksClause()
: OMPClause(llvm::omp::OMPC_num_tasks, SourceLocation(),
SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); }
child_range children() { return child_range(&NumTasks, &NumTasks + 1); }
const_child_range children() const {
return const_child_range(&NumTasks, &NumTasks + 1);
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPNumTasksClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_num_tasks;
}
};
/// This represents 'hint' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp critical (name) hint(6)
/// \endcode
/// In this example directive '#pragma omp critical' has name 'name' and clause
/// 'hint' with argument '6'.
class OMPHintClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Hint expression of the 'hint' clause.
Stmt *Hint = nullptr;
/// Set hint expression.
void setHint(Expr *H) { Hint = H; }
public:
/// Build 'hint' clause with expression \a Hint.
///
/// \param Hint Hint expression.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc),
Hint(Hint) {}
/// Build an empty clause.
OMPHintClause()
: OMPClause(llvm::omp::OMPC_hint, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns number of threads.
Expr *getHint() const { return cast_or_null<Expr>(Hint); }
child_range children() { return child_range(&Hint, &Hint + 1); }
const_child_range children() const {
return const_child_range(&Hint, &Hint + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_hint;
}
};
/// This represents 'dist_schedule' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp distribute dist_schedule(static, 3)
/// \endcode
/// In this example directive '#pragma omp distribute' has 'dist_schedule'
/// clause with arguments 'static' and '3'.
class OMPDistScheduleClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'schedule' clause.
OpenMPDistScheduleClauseKind Kind = OMPC_DIST_SCHEDULE_unknown;
/// Start location of the schedule kind in source code.
SourceLocation KindLoc;
/// Location of ',' (if any).
SourceLocation CommaLoc;
/// Chunk size.
Expr *ChunkSize = nullptr;
/// Set schedule kind.
///
/// \param K Schedule kind.
void setDistScheduleKind(OpenMPDistScheduleClauseKind K) { Kind = K; }
/// Sets the location of '('.
///
/// \param Loc Location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Set schedule kind start location.
///
/// \param KLoc Schedule kind location.
void setDistScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
/// Set location of ','.
///
/// \param Loc Location of ','.
void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; }
/// Set chunk size.
///
/// \param E Chunk size.
void setChunkSize(Expr *E) { ChunkSize = E; }
public:
/// Build 'dist_schedule' clause with schedule kind \a Kind and chunk
/// size expression \a ChunkSize.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param CommaLoc Location of ','.
/// \param EndLoc Ending location of the clause.
/// \param Kind DistSchedule kind.
/// \param ChunkSize Chunk size.
/// \param HelperChunkSize Helper chunk size for combined directives.
OMPDistScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation KLoc, SourceLocation CommaLoc,
SourceLocation EndLoc,
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
Stmt *HelperChunkSize)
: OMPClause(llvm::omp::OMPC_dist_schedule, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind),
KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) {
setPreInitStmt(HelperChunkSize);
}
/// Build an empty clause.
explicit OMPDistScheduleClause()
: OMPClause(llvm::omp::OMPC_dist_schedule, SourceLocation(),
SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Get kind of the clause.
OpenMPDistScheduleClauseKind getDistScheduleKind() const { return Kind; }
/// Get location of '('.
SourceLocation getLParenLoc() { return LParenLoc; }
/// Get kind location.
SourceLocation getDistScheduleKindLoc() { return KindLoc; }
/// Get location of ','.
SourceLocation getCommaLoc() { return CommaLoc; }
/// Get chunk size.
Expr *getChunkSize() { return ChunkSize; }
/// Get chunk size.
const Expr *getChunkSize() const { return ChunkSize; }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(&ChunkSize),
reinterpret_cast<Stmt **>(&ChunkSize) + 1);
}
const_child_range children() const {
auto Children = const_cast<OMPDistScheduleClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_dist_schedule;
}
};
/// This represents 'defaultmap' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp target defaultmap(tofrom: scalar)
/// \endcode
/// In this example directive '#pragma omp target' has 'defaultmap' clause of kind
/// 'scalar' with modifier 'tofrom'.
class OMPDefaultmapClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Modifiers for 'defaultmap' clause.
OpenMPDefaultmapClauseModifier Modifier = OMPC_DEFAULTMAP_MODIFIER_unknown;
/// Locations of modifiers.
SourceLocation ModifierLoc;
/// A kind of the 'defaultmap' clause.
OpenMPDefaultmapClauseKind Kind = OMPC_DEFAULTMAP_unknown;
/// Start location of the defaultmap kind in source code.
SourceLocation KindLoc;
/// Set defaultmap kind.
///
/// \param K Defaultmap kind.
void setDefaultmapKind(OpenMPDefaultmapClauseKind K) { Kind = K; }
/// Set the defaultmap modifier.
///
/// \param M Defaultmap modifier.
void setDefaultmapModifier(OpenMPDefaultmapClauseModifier M) {
Modifier = M;
}
/// Set location of the defaultmap modifier.
void setDefaultmapModifierLoc(SourceLocation Loc) {
ModifierLoc = Loc;
}
/// Sets the location of '('.
///
/// \param Loc Location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Set defaultmap kind start location.
///
/// \param KLoc Defaultmap kind location.
void setDefaultmapKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
public:
/// Build 'defaultmap' clause with defaultmap kind \a Kind
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param EndLoc Ending location of the clause.
/// \param Kind Defaultmap kind.
/// \param M The modifier applied to 'defaultmap' clause.
/// \param MLoc Location of the modifier
OMPDefaultmapClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation MLoc, SourceLocation KLoc,
SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind,
OpenMPDefaultmapClauseModifier M)
: OMPClause(llvm::omp::OMPC_defaultmap, StartLoc, EndLoc),
LParenLoc(LParenLoc), Modifier(M), ModifierLoc(MLoc), Kind(Kind),
KindLoc(KLoc) {}
/// Build an empty clause.
explicit OMPDefaultmapClause()
: OMPClause(llvm::omp::OMPC_defaultmap, SourceLocation(),
SourceLocation()) {}
/// Get kind of the clause.
OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; }
/// Get the modifier of the clause.
OpenMPDefaultmapClauseModifier getDefaultmapModifier() const {
return Modifier;
}
/// Get location of '('.
SourceLocation getLParenLoc() { return LParenLoc; }
/// Get kind location.
SourceLocation getDefaultmapKindLoc() { return KindLoc; }
/// Get the modifier location.
SourceLocation getDefaultmapModifierLoc() const {
return ModifierLoc;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_defaultmap;
}
};
/// This represents clause 'to' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target update to(a,b)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'to'
/// with the variables 'a' and 'b'.
class OMPToClause final : public OMPMappableExprListClause<OMPToClause>,
private llvm::TrailingObjects<
OMPToClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Motion-modifiers for the 'to' clause.
OpenMPMotionModifierKind MotionModifiers[NumberOfOMPMotionModifiers] = {
OMPC_MOTION_MODIFIER_unknown, OMPC_MOTION_MODIFIER_unknown};
/// Location of motion-modifiers for the 'to' clause.
SourceLocation MotionModifiersLoc[NumberOfOMPMotionModifiers];
/// Colon location.
SourceLocation ColonLoc;
/// Build clause with number of variables \a NumVars.
///
/// \param TheMotionModifiers Motion-modifiers.
/// \param TheMotionModifiersLoc Locations of motion-modifiers.
/// \param MapperQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperIdInfo The identifier of associated user-defined mapper.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPToClause(ArrayRef<OpenMPMotionModifierKind> TheMotionModifiers,
ArrayRef<SourceLocation> TheMotionModifiersLoc,
NestedNameSpecifierLoc MapperQualifierLoc,
DeclarationNameInfo MapperIdInfo,
const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(llvm::omp::OMPC_to, Locs, Sizes,
/*SupportsMapper=*/true, &MapperQualifierLoc,
&MapperIdInfo) {
assert(llvm::array_lengthof(MotionModifiers) == TheMotionModifiers.size() &&
"Unexpected number of motion modifiers.");
llvm::copy(TheMotionModifiers, std::begin(MotionModifiers));
assert(llvm::array_lengthof(MotionModifiersLoc) ==
TheMotionModifiersLoc.size() &&
"Unexpected number of motion modifier locations.");
llvm::copy(TheMotionModifiersLoc, std::begin(MotionModifiersLoc));
}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPToClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(llvm::omp::OMPC_to, OMPVarListLocTy(), Sizes,
/*SupportsMapper=*/true) {}
/// Set motion-modifier for the clause.
///
/// \param I index for motion-modifier.
/// \param T motion-modifier for the clause.
void setMotionModifier(unsigned I, OpenMPMotionModifierKind T) {
assert(I < NumberOfOMPMotionModifiers &&
"Unexpected index to store motion modifier, exceeds array size.");
MotionModifiers[I] = T;
}
/// Set location for the motion-modifier.
///
/// \param I index for motion-modifier location.
/// \param TLoc motion-modifier location.
void setMotionModifierLoc(unsigned I, SourceLocation TLoc) {
assert(I < NumberOfOMPMotionModifiers &&
"Index to store motion modifier location exceeds array size.");
MotionModifiersLoc[I] = TLoc;
}
/// Set colon location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
// There are varlist_size() of expressions, and varlist_size() of
// user-defined mappers.
return 2 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
/// \param MotionModifiers Motion-modifiers.
/// \param MotionModifiersLoc Location of motion-modifiers.
/// \param UDMapperRefs References to user-defined mappers associated with
/// expressions used in the clause.
/// \param UDMQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperId The identifier of associated user-defined mapper.
static OMPToClause *Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists,
ArrayRef<Expr *> UDMapperRefs,
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
NestedNameSpecifierLoc UDMQualifierLoc,
DeclarationNameInfo MapperId);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPToClause *CreateEmpty(const ASTContext &C,
const OMPMappableExprListSizeTy &Sizes);
/// Fetches the motion-modifier at 'Cnt' index of array of modifiers.
///
/// \param Cnt index for motion-modifier.
OpenMPMotionModifierKind getMotionModifier(unsigned Cnt) const LLVM_READONLY {
assert(Cnt < NumberOfOMPMotionModifiers &&
"Requested modifier exceeds the total number of modifiers.");
return MotionModifiers[Cnt];
}
/// Fetches the motion-modifier location at 'Cnt' index of array of modifiers'
/// locations.
///
/// \param Cnt index for motion-modifier location.
SourceLocation getMotionModifierLoc(unsigned Cnt) const LLVM_READONLY {
assert(Cnt < NumberOfOMPMotionModifiers &&
"Requested modifier location exceeds total number of modifiers.");
return MotionModifiersLoc[Cnt];
}
/// Fetches ArrayRef of motion-modifiers.
ArrayRef<OpenMPMotionModifierKind> getMotionModifiers() const LLVM_READONLY {
return llvm::makeArrayRef(MotionModifiers);
}
/// Fetches ArrayRef of location of motion-modifiers.
ArrayRef<SourceLocation> getMotionModifiersLoc() const LLVM_READONLY {
return llvm::makeArrayRef(MotionModifiersLoc);
}
/// Get colon location.
SourceLocation getColonLoc() const { return ColonLoc; }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPToClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_to;
}
};
/// This represents clause 'from' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target update from(a,b)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'from'
/// with the variables 'a' and 'b'.
class OMPFromClause final
: public OMPMappableExprListClause<OMPFromClause>,
private llvm::TrailingObjects<
OMPFromClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Motion-modifiers for the 'from' clause.
OpenMPMotionModifierKind MotionModifiers[NumberOfOMPMotionModifiers] = {
OMPC_MOTION_MODIFIER_unknown, OMPC_MOTION_MODIFIER_unknown};
/// Location of motion-modifiers for the 'from' clause.
SourceLocation MotionModifiersLoc[NumberOfOMPMotionModifiers];
/// Colon location.
SourceLocation ColonLoc;
/// Build clause with number of variables \a NumVars.
///
/// \param TheMotionModifiers Motion-modifiers.
/// \param TheMotionModifiersLoc Locations of motion-modifiers.
/// \param MapperQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperIdInfo The identifier of associated user-defined mapper.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPFromClause(ArrayRef<OpenMPMotionModifierKind> TheMotionModifiers,
ArrayRef<SourceLocation> TheMotionModifiersLoc,
NestedNameSpecifierLoc MapperQualifierLoc,
DeclarationNameInfo MapperIdInfo,
const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(llvm::omp::OMPC_from, Locs, Sizes,
/*SupportsMapper=*/true, &MapperQualifierLoc,
&MapperIdInfo) {
assert(llvm::array_lengthof(MotionModifiers) == TheMotionModifiers.size() &&
"Unexpected number of motion modifiers.");
llvm::copy(TheMotionModifiers, std::begin(MotionModifiers));
assert(llvm::array_lengthof(MotionModifiersLoc) ==
TheMotionModifiersLoc.size() &&
"Unexpected number of motion modifier locations.");
llvm::copy(TheMotionModifiersLoc, std::begin(MotionModifiersLoc));
}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPFromClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(llvm::omp::OMPC_from, OMPVarListLocTy(),
Sizes, /*SupportsMapper=*/true) {}
/// Set motion-modifier for the clause.
///
/// \param I index for motion-modifier.
/// \param T motion-modifier for the clause.
void setMotionModifier(unsigned I, OpenMPMotionModifierKind T) {
assert(I < NumberOfOMPMotionModifiers &&
"Unexpected index to store motion modifier, exceeds array size.");
MotionModifiers[I] = T;
}
/// Set location for the motion-modifier.
///
/// \param I index for motion-modifier location.
/// \param TLoc motion-modifier location.
void setMotionModifierLoc(unsigned I, SourceLocation TLoc) {
assert(I < NumberOfOMPMotionModifiers &&
"Index to store motion modifier location exceeds array size.");
MotionModifiersLoc[I] = TLoc;
}
/// Set colon location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
// There are varlist_size() of expressions, and varlist_size() of
// user-defined mappers.
return 2 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
/// \param MotionModifiers Motion-modifiers.
/// \param MotionModifiersLoc Location of motion-modifiers.
/// \param UDMapperRefs References to user-defined mappers associated with
/// expressions used in the clause.
/// \param UDMQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperId The identifier of associated user-defined mapper.
static OMPFromClause *
Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists,
ArrayRef<Expr *> UDMapperRefs,
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPFromClause *CreateEmpty(const ASTContext &C,
const OMPMappableExprListSizeTy &Sizes);
/// Fetches the motion-modifier at 'Cnt' index of array of modifiers.
///
/// \param Cnt index for motion-modifier.
OpenMPMotionModifierKind getMotionModifier(unsigned Cnt) const LLVM_READONLY {
assert(Cnt < NumberOfOMPMotionModifiers &&
"Requested modifier exceeds the total number of modifiers.");
return MotionModifiers[Cnt];
}
/// Fetches the motion-modifier location at 'Cnt' index of array of modifiers'
/// locations.
///
/// \param Cnt index for motion-modifier location.
SourceLocation getMotionModifierLoc(unsigned Cnt) const LLVM_READONLY {
assert(Cnt < NumberOfOMPMotionModifiers &&
"Requested modifier location exceeds total number of modifiers.");
return MotionModifiersLoc[Cnt];
}
/// Fetches ArrayRef of motion-modifiers.
ArrayRef<OpenMPMotionModifierKind> getMotionModifiers() const LLVM_READONLY {
return llvm::makeArrayRef(MotionModifiers);
}
/// Fetches ArrayRef of location of motion-modifiers.
ArrayRef<SourceLocation> getMotionModifiersLoc() const LLVM_READONLY {
return llvm::makeArrayRef(MotionModifiersLoc);
}
/// Get colon location.
SourceLocation getColonLoc() const { return ColonLoc; }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPFromClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_from;
}
};
/// This represents clause 'use_device_ptr' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target data use_device_ptr(a,b)
/// \endcode
/// In this example directive '#pragma omp target data' has clause
/// 'use_device_ptr' with the variables 'a' and 'b'.
class OMPUseDevicePtrClause final
: public OMPMappableExprListClause<OMPUseDevicePtrClause>,
private llvm::TrailingObjects<
OMPUseDevicePtrClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPUseDevicePtrClause(const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(llvm::omp::OMPC_use_device_ptr, Locs, Sizes) {
}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPUseDevicePtrClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(llvm::omp::OMPC_use_device_ptr,
OMPVarListLocTy(), Sizes) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return 3 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
/// Sets the list of references to private copies with initializers for new
/// private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// Gets the list of references to private copies with initializers for new
/// private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Sets the list of references to initializer variables for new private
/// variables.
/// \param VL List of references.
void setInits(ArrayRef<Expr *> VL);
/// Gets the list of references to initializer variables for new private
/// variables.
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param PrivateVars Expressions referring to private copies.
/// \param Inits Expressions referring to private copy initializers.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
static OMPUseDevicePtrClause *
Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars, ArrayRef<Expr *> PrivateVars,
ArrayRef<Expr *> Inits, ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPUseDevicePtrClause *
CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes);
using private_copies_iterator = MutableArrayRef<Expr *>::iterator;
using private_copies_const_iterator = ArrayRef<const Expr *>::iterator;
using private_copies_range = llvm::iterator_range<private_copies_iterator>;
using private_copies_const_range =
llvm::iterator_range<private_copies_const_iterator>;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
using inits_iterator = MutableArrayRef<Expr *>::iterator;
using inits_const_iterator = ArrayRef<const Expr *>::iterator;
using inits_range = llvm::iterator_range<inits_iterator>;
using inits_const_range = llvm::iterator_range<inits_const_iterator>;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPUseDevicePtrClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_use_device_ptr;
}
};
/// This represents clause 'use_device_addr' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target data use_device_addr(a,b)
/// \endcode
/// In this example directive '#pragma omp target data' has clause
/// 'use_device_addr' with the variables 'a' and 'b'.
class OMPUseDeviceAddrClause final
: public OMPMappableExprListClause<OMPUseDeviceAddrClause>,
private llvm::TrailingObjects<
OMPUseDeviceAddrClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPUseDeviceAddrClause(const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(llvm::omp::OMPC_use_device_addr, Locs,
Sizes) {}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPUseDeviceAddrClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(llvm::omp::OMPC_use_device_addr,
OMPVarListLocTy(), Sizes) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
static OMPUseDeviceAddrClause *
Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPUseDeviceAddrClause *
CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPUseDeviceAddrClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_use_device_addr;
}
};
/// This represents clause 'is_device_ptr' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target is_device_ptr(a,b)
/// \endcode
/// In this example directive '#pragma omp target' has clause
/// 'is_device_ptr' with the variables 'a' and 'b'.
class OMPIsDevicePtrClause final
: public OMPMappableExprListClause<OMPIsDevicePtrClause>,
private llvm::TrailingObjects<
OMPIsDevicePtrClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPIsDevicePtrClause(const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(llvm::omp::OMPC_is_device_ptr, Locs, Sizes) {}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPIsDevicePtrClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(llvm::omp::OMPC_is_device_ptr,
OMPVarListLocTy(), Sizes) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
static OMPIsDevicePtrClause *
Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPIsDevicePtrClause *
CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPIsDevicePtrClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_is_device_ptr;
}
};
/// This represents clause 'nontemporal' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp simd nontemporal(a)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'nontemporal' for
/// the variable 'a'.
class OMPNontemporalClause final
: public OMPVarListClause<OMPNontemporalClause>,
private llvm::TrailingObjects<OMPNontemporalClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPNontemporalClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPNontemporalClause>(llvm::omp::OMPC_nontemporal,
StartLoc, LParenLoc, EndLoc, N) {
}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPNontemporalClause(unsigned N)
: OMPVarListClause<OMPNontemporalClause>(
llvm::omp::OMPC_nontemporal, SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
/// Get the list of privatied copies if the member expression was captured by
/// one of the privatization clauses.
MutableArrayRef<Expr *> getPrivateRefs() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateRefs() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
static OMPNontemporalClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPNontemporalClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Sets the list of references to private copies created in private clauses.
/// \param VL List of references.
void setPrivateRefs(ArrayRef<Expr *> VL);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPNontemporalClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range private_refs() {
return child_range(reinterpret_cast<Stmt **>(getPrivateRefs().begin()),
reinterpret_cast<Stmt **>(getPrivateRefs().end()));
}
const_child_range private_refs() const {
auto Children = const_cast<OMPNontemporalClause *>(this)->private_refs();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_nontemporal;
}
};
/// This represents 'order' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp simd order(concurrent)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'order'
/// clause with kind 'concurrent'.
class OMPOrderClause final : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'default' clause.
OpenMPOrderClauseKind Kind = OMPC_ORDER_unknown;
/// Start location of the kind in source code.
SourceLocation KindKwLoc;
/// Set kind of the clause.
///
/// \param K Argument of clause.
void setKind(OpenMPOrderClauseKind K) { Kind = K; }
/// Set argument location.
///
/// \param KLoc Argument location.
void setKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
public:
/// Build 'order' clause with argument \p A ('concurrent').
///
/// \param A Argument of the clause ('concurrent').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPOrderClause(OpenMPOrderClauseKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_order, StartLoc, EndLoc),
LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPOrderClause()
: OMPClause(llvm::omp::OMPC_order, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns kind of the clause.
OpenMPOrderClauseKind getKind() const { return Kind; }
/// Returns location of clause kind.
SourceLocation getKindKwLoc() const { return KindKwLoc; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_order;
}
};
/// This represents 'destroy' clause in the '#pragma omp depobj'
/// directive.
///
/// \code
/// #pragma omp depobj(a) destroy
/// \endcode
/// In this example directive '#pragma omp depobj' has 'destroy' clause.
class OMPDestroyClause final : public OMPClause {
public:
/// Build 'destroy' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPDestroyClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_destroy, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPDestroyClause()
: OMPClause(llvm::omp::OMPC_destroy, SourceLocation(), SourceLocation()) {
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_destroy;
}
};
/// This represents 'detach' clause in the '#pragma omp task' directive.
///
/// \code
/// #pragma omp task detach(evt)
/// \endcode
/// In this example directive '#pragma omp detach' has simple 'detach' clause
/// with the variable 'evt'.
class OMPDetachClause final : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Expression of the 'detach' clause.
Stmt *Evt = nullptr;
/// Set condition.
void setEventHandler(Expr *E) { Evt = E; }
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
public:
/// Build 'detach' clause with event-handler \a Evt.
///
/// \param Evt Event handler expression.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_detach, StartLoc, EndLoc),
LParenLoc(LParenLoc), Evt(Evt) {}
/// Build an empty clause.
OMPDetachClause()
: OMPClause(llvm::omp::OMPC_detach, SourceLocation(), SourceLocation()) {}
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns event-handler expression.
Expr *getEventHandler() const { return cast_or_null<Expr>(Evt); }
child_range children() { return child_range(&Evt, &Evt + 1); }
const_child_range children() const {
return const_child_range(&Evt, &Evt + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_detach;
}
};
/// This represents clause 'inclusive' in the '#pragma omp scan' directive.
///
/// \code
/// #pragma omp scan inclusive(a,b)
/// \endcode
/// In this example directive '#pragma omp scan' has clause 'inclusive'
/// with the variables 'a' and 'b'.
class OMPInclusiveClause final
: public OMPVarListClause<OMPInclusiveClause>,
private llvm::TrailingObjects<OMPInclusiveClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPInclusiveClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPInclusiveClause>(llvm::omp::OMPC_inclusive,
StartLoc, LParenLoc, EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPInclusiveClause(unsigned N)
: OMPVarListClause<OMPInclusiveClause>(llvm::omp::OMPC_inclusive,
SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the original variables.
static OMPInclusiveClause *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPInclusiveClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPInclusiveClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_inclusive;
}
};
/// This represents clause 'exclusive' in the '#pragma omp scan' directive.
///
/// \code
/// #pragma omp scan exclusive(a,b)
/// \endcode
/// In this example directive '#pragma omp scan' has clause 'exclusive'
/// with the variables 'a' and 'b'.
class OMPExclusiveClause final
: public OMPVarListClause<OMPExclusiveClause>,
private llvm::TrailingObjects<OMPExclusiveClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPExclusiveClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPExclusiveClause>(llvm::omp::OMPC_exclusive,
StartLoc, LParenLoc, EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPExclusiveClause(unsigned N)
: OMPVarListClause<OMPExclusiveClause>(llvm::omp::OMPC_exclusive,
SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the original variables.
static OMPExclusiveClause *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPExclusiveClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPExclusiveClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_exclusive;
}
};
/// This represents clause 'uses_allocators' in the '#pragma omp target'-based
/// directives.
///
/// \code
/// #pragma omp target uses_allocators(default_allocator, my_allocator(traits))
/// \endcode
/// In this example directive '#pragma omp target' has clause 'uses_allocators'
/// with the allocators 'default_allocator' and user-defined 'my_allocator'.
class OMPUsesAllocatorsClause final
: public OMPClause,
private llvm::TrailingObjects<OMPUsesAllocatorsClause, Expr *,
SourceLocation> {
public:
/// Data for list of allocators.
struct Data {
/// Allocator.
Expr *Allocator = nullptr;
/// Allocator traits.
Expr *AllocatorTraits = nullptr;
/// Locations of '(' and ')' symbols.
SourceLocation LParenLoc, RParenLoc;
};
private:
friend class OMPClauseReader;
friend TrailingObjects;
enum class ExprOffsets {
Allocator,
AllocatorTraits,
Total,
};
enum class ParenLocsOffsets {
LParen,
RParen,
Total,
};
/// Location of '('.
SourceLocation LParenLoc;
/// Total number of allocators in the clause.
unsigned NumOfAllocators = 0;
/// Build clause.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of allocators asssociated with the clause.
OMPUsesAllocatorsClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPClause(llvm::omp::OMPC_uses_allocators, StartLoc, EndLoc),
LParenLoc(LParenLoc), NumOfAllocators(N) {}
/// Build an empty clause.
/// \param N Number of allocators asssociated with the clause.
///
explicit OMPUsesAllocatorsClause(unsigned N)
: OMPClause(llvm::omp::OMPC_uses_allocators, SourceLocation(),
SourceLocation()),
NumOfAllocators(N) {}
unsigned numTrailingObjects(OverloadToken<Expr *>) const {
return NumOfAllocators * static_cast<int>(ExprOffsets::Total);
}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Sets the allocators data for the clause.
void setAllocatorsData(ArrayRef<OMPUsesAllocatorsClause::Data> Data);
public:
/// Creates clause with a list of allocators \p Data.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param Data List of allocators.
static OMPUsesAllocatorsClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<OMPUsesAllocatorsClause::Data> Data);
/// Creates an empty clause with the place for \p N allocators.
///
/// \param C AST context.
/// \param N The number of allocators.
static OMPUsesAllocatorsClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns number of allocators associated with the clause.
unsigned getNumberOfAllocators() const { return NumOfAllocators; }
/// Returns data for the specified allocator.
OMPUsesAllocatorsClause::Data getAllocatorData(unsigned I) const;
// Iterators
child_range children() {
Stmt **Begin = reinterpret_cast<Stmt **>(getTrailingObjects<Expr *>());
return child_range(Begin, Begin + NumOfAllocators *
static_cast<int>(ExprOffsets::Total));
}
const_child_range children() const {
Stmt *const *Begin =
reinterpret_cast<Stmt *const *>(getTrailingObjects<Expr *>());
return const_child_range(
Begin, Begin + NumOfAllocators * static_cast<int>(ExprOffsets::Total));
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_uses_allocators;
}
};
/// This represents clause 'affinity' in the '#pragma omp task'-based
/// directives.
///
/// \code
/// #pragma omp task affinity(iterator(i = 0:n) : ([3][n])a, b[:n], c[i])
/// \endcode
/// In this example directive '#pragma omp task' has clause 'affinity' with the
/// affinity modifer 'iterator(i = 0:n)' and locator items '([3][n])a', 'b[:n]'
/// and 'c[i]'.
class OMPAffinityClause final
: public OMPVarListClause<OMPAffinityClause>,
private llvm::TrailingObjects<OMPAffinityClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':' symbol.
SourceLocation ColonLoc;
/// Build clause.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param N Number of locators asssociated with the clause.
OMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPAffinityClause>(llvm::omp::OMPC_affinity, StartLoc,
LParenLoc, EndLoc, N) {}
/// Build an empty clause.
/// \param N Number of locators asssociated with the clause.
///
explicit OMPAffinityClause(unsigned N)
: OMPVarListClause<OMPAffinityClause>(llvm::omp::OMPC_affinity,
SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
/// Sets the affinity modifier for the clause, if any.
void setModifier(Expr *E) {
getTrailingObjects<Expr *>()[varlist_size()] = E;
}
/// Sets the location of ':' symbol.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// Creates clause with a modifier a list of locator items.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param Locators List of locator items.
static OMPAffinityClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, Expr *Modifier,
ArrayRef<Expr *> Locators);
/// Creates an empty clause with the place for \p N locator items.
///
/// \param C AST context.
/// \param N The number of locator items.
static OMPAffinityClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Gets affinity modifier.
Expr *getModifier() { return getTrailingObjects<Expr *>()[varlist_size()]; }
Expr *getModifier() const {
return getTrailingObjects<Expr *>()[varlist_size()];
}
/// Gets the location of ':' symbol.
SourceLocation getColonLoc() const { return ColonLoc; }
// Iterators
child_range children() {
int Offset = getModifier() ? 1 : 0;
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end() + Offset));
}
const_child_range children() const {
auto Children = const_cast<OMPAffinityClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_affinity;
}
};
/// This class implements a simple visitor for OMPClause
/// subclasses.
template<class ImplClass, template <typename> class Ptr, typename RetTy>
class OMPClauseVisitorBase {
public:
#define PTR(CLASS) Ptr<CLASS>
#define DISPATCH(CLASS) \
return static_cast<ImplClass*>(this)->Visit##CLASS(static_cast<PTR(CLASS)>(S))
#define OMP_CLAUSE_CLASS(Enum, Str, Class) \
RetTy Visit ## Class (PTR(Class) S) { DISPATCH(Class); }
#include "llvm/Frontend/OpenMP/OMPKinds.def"
RetTy Visit(PTR(OMPClause) S) {
// Top switch clause: visit each OMPClause.
switch (S->getClauseKind()) {
#define OMP_CLAUSE_CLASS(Enum, Str, Class) \
case llvm::omp::Clause::Enum: \
return Visit##Class(static_cast<PTR(Class)>(S));
#define OMP_CLAUSE_NO_CLASS(Enum, Str) \
case llvm::omp::Clause::Enum: \
break;
#include "llvm/Frontend/OpenMP/OMPKinds.def"
default:
break;
}
}
// Base case, ignore it. :)
RetTy VisitOMPClause(PTR(OMPClause) Node) { return RetTy(); }
#undef PTR
#undef DISPATCH
};
template <typename T> using const_ptr = std::add_pointer_t<std::add_const_t<T>>;
template <class ImplClass, typename RetTy = void>
class OMPClauseVisitor
: public OMPClauseVisitorBase<ImplClass, std::add_pointer_t, RetTy> {};
template<class ImplClass, typename RetTy = void>
class ConstOMPClauseVisitor :
public OMPClauseVisitorBase <ImplClass, const_ptr, RetTy> {};
class OMPClausePrinter final : public OMPClauseVisitor<OMPClausePrinter> {
raw_ostream &OS;
const PrintingPolicy &Policy;
/// Process clauses with list of variables.
template <typename T> void VisitOMPClauseList(T *Node, char StartSym);
/// Process motion clauses.
template <typename T> void VisitOMPMotionClause(T *Node);
public:
OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy)
: OS(OS), Policy(Policy) {}
#define OMP_CLAUSE_CLASS(Enum, Str, Class) \
void Visit##Class(Class *S);
#include "llvm/Frontend/OpenMP/OMPKinds.def"
};
struct OMPTraitProperty {
llvm::omp::TraitProperty Kind = llvm::omp::TraitProperty::invalid;
/// The raw string as we parsed it. This is needed for the `isa` trait set
/// (which accepts anything) and (later) extensions.
StringRef RawString;
};
struct OMPTraitSelector {
Expr *ScoreOrCondition = nullptr;
llvm::omp::TraitSelector Kind = llvm::omp::TraitSelector::invalid;
llvm::SmallVector<OMPTraitProperty, 1> Properties;
};
struct OMPTraitSet {
llvm::omp::TraitSet Kind = llvm::omp::TraitSet::invalid;
llvm::SmallVector<OMPTraitSelector, 2> Selectors;
};
/// Helper data structure representing the traits in a match clause of an
/// `declare variant` or `metadirective`. The outer level is an ordered
/// collection of selector sets, each with an associated kind and an ordered
/// collection of selectors. A selector has a kind, an optional score/condition,
/// and an ordered collection of properties.
class OMPTraitInfo {
/// Private constructor accesible only by ASTContext.
OMPTraitInfo() {}
friend class ASTContext;
public:
/// Reconstruct a (partial) OMPTraitInfo object from a mangled name.
OMPTraitInfo(StringRef MangledName);
/// The outermost level of selector sets.
llvm::SmallVector<OMPTraitSet, 2> Sets;
bool anyScoreOrCondition(
llvm::function_ref<bool(Expr *&, bool /* IsScore */)> Cond) {
return llvm::any_of(Sets, [&](OMPTraitSet &Set) {
return llvm::any_of(
Set.Selectors, [&](OMPTraitSelector &Selector) {
return Cond(Selector.ScoreOrCondition,
/* IsScore */ Selector.Kind !=
llvm::omp::TraitSelector::user_condition);
});
});
}
/// Create a variant match info object from this trait info object. While the
/// former is a flat representation the actual main difference is that the
/// latter uses clang::Expr to store the score/condition while the former is
/// independent of clang. Thus, expressions and conditions are evaluated in
/// this method.
void getAsVariantMatchInfo(ASTContext &ASTCtx,
llvm::omp::VariantMatchInfo &VMI) const;
/// Return a string representation identifying this context selector.
std::string getMangledName() const;
/// Print a human readable representation into \p OS.
void print(llvm::raw_ostream &OS, const PrintingPolicy &Policy) const;
};
llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo &TI);
llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo *TI);
/// Clang specific specialization of the OMPContext to lookup target features.
struct TargetOMPContext final : public llvm::omp::OMPContext {
TargetOMPContext(ASTContext &ASTCtx,
std::function<void(StringRef)> &&DiagUnknownTrait,
const FunctionDecl *CurrentFunctionDecl);
virtual ~TargetOMPContext() = default;
/// See llvm::omp::OMPContext::matchesISATrait
bool matchesISATrait(StringRef RawString) const override;
private:
std::function<bool(StringRef)> FeatureValidityCheck;
std::function<void(StringRef)> DiagUnknownTrait;
llvm::StringMap<bool> FeatureMap;
};
} // namespace clang
#endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
|
vision.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V V IIIII SSSSS IIIII OOO N N %
% V V I SS I O O NN N %
% V V I SSS I O O N N N %
% V V I SS I O O N NN %
% V IIIII SSSSS IIIII OOO N N %
% %
% %
% MagickCore Computer Vision Methods %
% %
% Software Design %
% Cristy %
% September 2014 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/constitute.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/matrix.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/montage.h"
#include "MagickCore/morphology.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/opencl-private.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resource_.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/vision.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n n e c t e d C o m p o n e n t s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConnectedComponentsImage() returns the connected-components of the image
% uniquely labeled. The returned connected components image colors member
% defines the number of unique objects. Choose from 4 or 8-way connectivity.
%
% You are responsible for freeing the connected components objects resources
% with this statement;
%
% objects = (CCObjectInfo *) RelinquishMagickMemory(objects);
%
% The format of the ConnectedComponentsImage method is:
%
% Image *ConnectedComponentsImage(const Image *image,
% const size_t connectivity,CCObjectInfo **objects,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o connectivity: how many neighbors to visit, choose from 4 or 8.
%
% o objects: return the attributes of each unique object.
%
% o exception: return any errors or warnings in this structure.
%
*/
static int CCObjectInfoCompare(const void *x,const void *y)
{
CCObjectInfo
*p,
*q;
p=(CCObjectInfo *) x;
q=(CCObjectInfo *) y;
return((int) (q->area-(ssize_t) p->area));
}
MagickExport Image *ConnectedComponentsImage(const Image *image,
const size_t connectivity,CCObjectInfo **objects,ExceptionInfo *exception)
{
#define ConnectedComponentsImageTag "ConnectedComponents/Image"
CacheView
*image_view,
*component_view;
CCObjectInfo
*object;
char
*c;
const char
*artifact;
double
area_threshold;
Image
*component_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MatrixInfo
*equivalences;
register ssize_t
i;
size_t
size;
ssize_t
first,
last,
n,
step,
y;
/*
Initialize connected components image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (objects != (CCObjectInfo **) NULL)
*objects=(CCObjectInfo *) NULL;
component_image=CloneImage(image,0,0,MagickTrue,
exception);
if (component_image == (Image *) NULL)
return((Image *) NULL);
component_image->depth=MAGICKCORE_QUANTUM_DEPTH;
if (AcquireImageColormap(component_image,MaxColormapSize,exception) == MagickFalse)
{
component_image=DestroyImage(component_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Initialize connected components equivalences.
*/
size=image->columns*image->rows;
if (image->columns != (size/image->rows))
{
component_image=DestroyImage(component_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
equivalences=AcquireMatrixInfo(size,1,sizeof(ssize_t),exception);
if (equivalences == (MatrixInfo *) NULL)
{
component_image=DestroyImage(component_image);
return((Image *) NULL);
}
for (n=0; n < (ssize_t) (image->columns*image->rows); n++)
(void) SetMatrixElement(equivalences,n,0,&n);
object=(CCObjectInfo *) AcquireQuantumMemory(MaxColormapSize,sizeof(*object));
if (object == (CCObjectInfo *) NULL)
{
equivalences=DestroyMatrixInfo(equivalences);
component_image=DestroyImage(component_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memset(object,0,MaxColormapSize*sizeof(*object));
for (i=0; i < (ssize_t) MaxColormapSize; i++)
{
object[i].id=i;
object[i].bounding_box.x=(ssize_t) image->columns;
object[i].bounding_box.y=(ssize_t) image->rows;
GetPixelInfo(image,&object[i].color);
}
/*
Find connected components.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
for (n=0; n < (ssize_t) (connectivity > 4 ? 4 : 2); n++)
{
ssize_t
connect4[2][2] = { { -1, 0 }, { 0, -1 } },
connect8[4][2] = { { -1, -1 }, { -1, 0 }, { -1, 1 }, { 0, -1 } },
dx,
dy;
if (status == MagickFalse)
continue;
dy=connectivity > 4 ? connect8[n][0] : connect4[n][0];
dx=connectivity > 4 ? connect8[n][1] : connect4[n][1];
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y-1,image->columns,3,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p+=GetPixelChannels(image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel,
target;
ssize_t
neighbor_offset,
obj,
offset,
ox,
oy,
root;
/*
Is neighbor an authentic pixel and a different color than the pixel?
*/
GetPixelInfoPixel(image,p,&pixel);
if (((x+dx) < 0) || ((x+dx) >= (ssize_t) image->columns) ||
((y+dy) < 0) || ((y+dy) >= (ssize_t) image->rows))
{
p+=GetPixelChannels(image);
continue;
}
neighbor_offset=dy*(GetPixelChannels(image)*image->columns)+dx*
GetPixelChannels(image);
GetPixelInfoPixel(image,p+neighbor_offset,&target);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
p+=GetPixelChannels(image);
continue;
}
/*
Resolve this equivalence.
*/
offset=y*image->columns+x;
neighbor_offset=dy*image->columns+dx;
ox=offset;
status=GetMatrixElement(equivalences,ox,0,&obj);
while (obj != ox)
{
ox=obj;
status=GetMatrixElement(equivalences,ox,0,&obj);
}
oy=offset+neighbor_offset;
status=GetMatrixElement(equivalences,oy,0,&obj);
while (obj != oy)
{
oy=obj;
status=GetMatrixElement(equivalences,oy,0,&obj);
}
if (ox < oy)
{
status=SetMatrixElement(equivalences,oy,0,&ox);
root=ox;
}
else
{
status=SetMatrixElement(equivalences,ox,0,&oy);
root=oy;
}
ox=offset;
status=GetMatrixElement(equivalences,ox,0,&obj);
while (obj != root)
{
status=GetMatrixElement(equivalences,ox,0,&obj);
status=SetMatrixElement(equivalences,ox,0,&root);
}
oy=offset+neighbor_offset;
status=GetMatrixElement(equivalences,oy,0,&obj);
while (obj != root)
{
status=GetMatrixElement(equivalences,oy,0,&obj);
status=SetMatrixElement(equivalences,oy,0,&root);
}
status=SetMatrixElement(equivalences,y*image->columns+x,0,&root);
p+=GetPixelChannels(image);
}
}
}
image_view=DestroyCacheView(image_view);
/*
Label connected components.
*/
n=0;
image_view=AcquireVirtualCacheView(image,exception);
component_view=AcquireAuthenticCacheView(component_image,exception);
for (y=0; y < (ssize_t) component_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(component_view,0,y,component_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) component_image->columns; x++)
{
ssize_t
id,
offset;
offset=y*image->columns+x;
status=GetMatrixElement(equivalences,offset,0,&id);
if (id != offset)
status=GetMatrixElement(equivalences,id,0,&id);
else
{
id=n++;
if (id >= (ssize_t) MaxColormapSize)
break;
}
status=SetMatrixElement(equivalences,offset,0,&id);
if (x < object[id].bounding_box.x)
object[id].bounding_box.x=x;
if (x >= (ssize_t) object[id].bounding_box.width)
object[id].bounding_box.width=(size_t) x;
if (y < object[id].bounding_box.y)
object[id].bounding_box.y=y;
if (y >= (ssize_t) object[id].bounding_box.height)
object[id].bounding_box.height=(size_t) y;
object[id].color.red+=QuantumScale*GetPixelRed(image,p);
object[id].color.green+=QuantumScale*GetPixelGreen(image,p);
object[id].color.blue+=QuantumScale*GetPixelBlue(image,p);
if (image->alpha_trait != UndefinedPixelTrait)
object[id].color.alpha+=QuantumScale*GetPixelAlpha(image,p);
if (image->colorspace == CMYKColorspace)
object[id].color.black+=QuantumScale*GetPixelBlack(image,p);
object[id].centroid.x+=x;
object[id].centroid.y+=y;
object[id].area++;
SetPixelIndex(component_image,(Quantum) id,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(component_image);
}
if (n > (ssize_t) MaxColormapSize)
break;
if (SyncCacheViewAuthenticPixels(component_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ConnectedComponentsImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
component_view=DestroyCacheView(component_view);
image_view=DestroyCacheView(image_view);
equivalences=DestroyMatrixInfo(equivalences);
if (n > (ssize_t) MaxColormapSize)
{
object=(CCObjectInfo *) RelinquishMagickMemory(object);
component_image=DestroyImage(component_image);
ThrowImageException(ResourceLimitError,"TooManyObjects");
}
component_image->colors=(size_t) n;
for (i=0; i < (ssize_t) component_image->colors; i++)
{
object[i].bounding_box.width-=(object[i].bounding_box.x-1);
object[i].bounding_box.height-=(object[i].bounding_box.y-1);
object[i].color.red=QuantumRange*(object[i].color.red/object[i].area);
object[i].color.green=QuantumRange*(object[i].color.green/object[i].area);
object[i].color.blue=QuantumRange*(object[i].color.blue/object[i].area);
if (image->alpha_trait != UndefinedPixelTrait)
object[i].color.alpha=QuantumRange*(object[i].color.alpha/object[i].area);
if (image->colorspace == CMYKColorspace)
object[i].color.black=QuantumRange*(object[i].color.black/object[i].area);
object[i].centroid.x=object[i].centroid.x/object[i].area;
object[i].centroid.y=object[i].centroid.y/object[i].area;
}
artifact=GetImageArtifact(image,"connected-components:area-threshold");
area_threshold=0.0;
if (artifact != (const char *) NULL)
area_threshold=StringToDouble(artifact,(char **) NULL);
if (area_threshold > 0.0)
{
/*
Merge object below area threshold.
*/
component_view=AcquireAuthenticCacheView(component_image,exception);
for (i=0; i < (ssize_t) component_image->colors; i++)
{
double
census;
RectangleInfo
bounding_box;
register ssize_t
j;
size_t
id;
if (status == MagickFalse)
continue;
if ((double) object[i].area >= area_threshold)
continue;
for (j=0; j < (ssize_t) component_image->colors; j++)
object[j].census=0;
bounding_box=object[i].bounding_box;
for (y=0; y < (ssize_t) bounding_box.height+2; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x-1,
bounding_box.y+y-1,bounding_box.width+2,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) bounding_box.width+2; x++)
{
j=(ssize_t) GetPixelIndex(component_image,p);
if (j != i)
object[j].census++;
p+=GetPixelChannels(component_image);
}
}
census=0;
id=0;
for (j=0; j < (ssize_t) component_image->colors; j++)
if (census < object[j].census)
{
census=object[j].census;
id=(size_t) j;
}
object[id].area+=object[i].area;
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,q) == i)
SetPixelIndex(component_image,(Quantum) id,q);
q+=GetPixelChannels(component_image);
}
if (SyncCacheViewAuthenticPixels(component_view,exception) == MagickFalse)
status=MagickFalse;
}
}
component_view=DestroyCacheView(component_view);
(void) SyncImage(component_image,exception);
}
artifact=GetImageArtifact(image,"connected-components:mean-color");
if (IsStringTrue(artifact) != MagickFalse)
{
/*
Replace object with mean color.
*/
for (i=0; i < (ssize_t) component_image->colors; i++)
component_image->colormap[i]=object[i].color;
}
artifact=GetImageArtifact(image,"connected-components:keep");
if (artifact != (const char *) NULL)
{
/*
Keep these object (make others transparent).
*/
for (i=0; i < (ssize_t) component_image->colors; i++)
object[i].census=0;
for (c=(char *) artifact; *c != '\0';)
{
while ((isspace((int) ((unsigned char) *c)) != 0) || (*c == ','))
c++;
first=(ssize_t) strtol(c,&c,10);
if (first < 0)
first+=(ssize_t) component_image->colors;
last=first;
while (isspace((int) ((unsigned char) *c)) != 0)
c++;
if (*c == '-')
{
last=(ssize_t) strtol(c+1,&c,10);
if (last < 0)
last+=(ssize_t) component_image->colors;
}
step=(ssize_t) (first > last ? -1 : 1);
for ( ; first != (last+step); first+=step)
object[first].census++;
}
for (i=0; i < (ssize_t) component_image->colors; i++)
{
if (object[i].census != 0)
continue;
component_image->alpha_trait=BlendPixelTrait;
component_image->colormap[i].alpha_trait=BlendPixelTrait;
component_image->colormap[i].alpha=(MagickRealType) TransparentAlpha;
}
}
artifact=GetImageArtifact(image,"connected-components:remove");
if (artifact != (const char *) NULL)
{
/*
Remove these object (make them transparent).
*/
for (c=(char *) artifact; *c != '\0';)
{
while ((isspace((int) ((unsigned char) *c)) != 0) || (*c == ','))
c++;
first=(ssize_t) strtol(c,&c,10);
if (first < 0)
first+=(ssize_t) component_image->colors;
last=first;
while (isspace((int) ((unsigned char) *c)) != 0)
c++;
if (*c == '-')
{
last=(ssize_t) strtol(c+1,&c,10);
if (last < 0)
last+=(ssize_t) component_image->colors;
}
step=(ssize_t) (first > last ? -1 : 1);
for ( ; first != (last+step); first+=step)
{
component_image->alpha_trait=BlendPixelTrait;
component_image->colormap[first].alpha_trait=BlendPixelTrait;
component_image->colormap[first].alpha=(MagickRealType)
TransparentAlpha;
}
}
}
(void) SyncImage(component_image,exception);
artifact=GetImageArtifact(image,"connected-components:verbose");
if ((IsStringTrue(artifact) != MagickFalse) ||
(objects != (CCObjectInfo **) NULL))
{
/*
Report statistics on unique object.
*/
for (i=0; i < (ssize_t) component_image->colors; i++)
{
object[i].bounding_box.width=0;
object[i].bounding_box.height=0;
object[i].bounding_box.x=(ssize_t) component_image->columns;
object[i].bounding_box.y=(ssize_t) component_image->rows;
object[i].centroid.x=0;
object[i].centroid.y=0;
object[i].area=0;
}
component_view=AcquireVirtualCacheView(component_image,exception);
for (y=0; y < (ssize_t) component_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,0,y,component_image->columns,
1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) component_image->columns; x++)
{
size_t
id;
id=GetPixelIndex(component_image,p);
if (x < object[id].bounding_box.x)
object[id].bounding_box.x=x;
if (x > (ssize_t) object[id].bounding_box.width)
object[id].bounding_box.width=(size_t) x;
if (y < object[id].bounding_box.y)
object[id].bounding_box.y=y;
if (y > (ssize_t) object[id].bounding_box.height)
object[id].bounding_box.height=(size_t) y;
object[id].centroid.x+=x;
object[id].centroid.y+=y;
object[id].area++;
p+=GetPixelChannels(component_image);
}
}
for (i=0; i < (ssize_t) component_image->colors; i++)
{
object[i].bounding_box.width-=(object[i].bounding_box.x-1);
object[i].bounding_box.height-=(object[i].bounding_box.y-1);
object[i].centroid.x=object[i].centroid.x/object[i].area;
object[i].centroid.y=object[i].centroid.y/object[i].area;
}
component_view=DestroyCacheView(component_view);
qsort((void *) object,component_image->colors,sizeof(*object),
CCObjectInfoCompare);
if (objects == (CCObjectInfo **) NULL)
{
(void) fprintf(stdout,
"Objects (id: bounding-box centroid area mean-color):\n");
for (i=0; i < (ssize_t) component_image->colors; i++)
{
char
mean_color[MagickPathExtent];
if (status == MagickFalse)
break;
if (object[i].area <= area_threshold)
continue;
GetColorTuple(&object[i].color,MagickFalse,mean_color);
(void) fprintf(stdout,
" %.20g: %.20gx%.20g%+.20g%+.20g %.1f,%.1f %.20g %s\n",(double)
object[i].id,(double) object[i].bounding_box.width,(double)
object[i].bounding_box.height,(double) object[i].bounding_box.x,
(double) object[i].bounding_box.y,object[i].centroid.x,
object[i].centroid.y,(double) object[i].area,mean_color);
}
}
}
if (objects == (CCObjectInfo **) NULL)
object=(CCObjectInfo *) RelinquishMagickMemory(object);
else
*objects=object;
return(component_image);
}
|
SE_fg_int_mex.c | #include "mex.h"
#include "../SE_fgg.h"
void SE_FGG_MEX_params(SE_FGG_params*, const mxArray*, int);
#define X prhs[0]
#define HH prhs[1]
#define OPT prhs[2]
#define PHI_OUT plhs[0] // Output
#ifndef VERBOSE
#define VERBOSE 0
#endif
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[] )
{
const int N = mxGetM(X);
double* restrict x = mxGetPr(X);
const double* H_per = mxGetPr(HH);
SE_FGG_params params;
SE_FGG_MEX_params(¶ms, OPT, N);
// scratch arrays
SE_FGG_work work;
SE_FGG_allocate_workspace(&work, ¶ms,true,false);
// output vector
PHI_OUT = mxCreateDoubleMatrix(N,1,mxREAL);
double* phi = mxGetPr(PHI_OUT);
// coordinates and charges
const SE_state st = {.x = x, .q = NULL};
if(VERBOSE)
mexPrintf("[SE%s FG(I)] N=%d, P=%d\n",PER_STR,N,params.P);
#ifdef _OPENMP
#pragma omp parallel default(shared)
#endif
{
// now do the work
SE_FGG_base_gaussian(&work, ¶ms);
#ifdef THREE_PERIODIC
SE_FGG_extend_fcn(&work, H_per, ¶ms);
#endif
#ifdef TWO_PERIODIC
SE2P_FGG_extend_fcn(&work, H_per, ¶ms);
#endif
#ifdef ONE_PERIODIC
SE1P_FGG_extend_fcn(&work, H_per, ¶ms);
#endif
SE_FGG_int(phi, &work, &st, ¶ms);
}
// done
SE_FGG_free_workspace(&work);
}
|
GB_unaryop__identity_uint16_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint16_int32
// op(A') function: GB_tran__identity_uint16_int32
// C type: uint16_t
// A type: int32_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint16_t z = (uint16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint16_int32
(
uint16_t *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint16_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB071-targetparallelfor-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
use of omp target: len is not mapped. It should be firstprivate within target.
*/
#include "omprace.h"
#include <omp.h>
int main(int argc, char* argv[])
{
omprace_init();
int i;
int len = 1000;
int a[len];
for (i=0; i<len; i++)
a[i]= i;
#pragma omp target map(a[0:len])
#pragma omp parallel for
for (i=0;i< len;i++)
a[i]=a[i]+1;
omprace_fini();
return 0;
}
|
ast-dump-openmp-target.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test(void) {
#pragma omp target
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target.c:3:1, line:6:1> line:3:6 test 'void (void)'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:17, line:6:1>
// CHECK-NEXT: `-OMPTargetDirective {{.*}} <line:4:1, col:19>
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: |-CapturedStmt {{.*}} <col:3>
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-NullStmt {{.*}} <col:3>
// CHECK-NEXT: | `-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target.c:4:1) *const restrict'
// CHECK-NEXT: |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target.c:4:1) *const restrict'
// CHECK-NEXT: |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | `-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: |-NullStmt {{.*}} <line:5:3>
// CHECK-NEXT: `-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target.c:4:1) *const restrict'
|
bml_import_ellpack_typed.c | #include "../../macros.h"
#include "../../typed.h"
#include "../bml_allocate.h"
#include "../bml_logger.h"
#include "../bml_types.h"
#include "bml_allocate_ellpack.h"
#include "bml_import_ellpack.h"
#include "bml_types_ellpack.h"
#include <complex.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Convert a dense matrix into a bml matrix.
*
* \ingroup convert_group
*
* \param N The number of rows/columns
* \param matrix_precision The real precision
* \param A The dense matrix
* \return The bml matrix
*/
bml_matrix_ellpack_t
* TYPED_FUNC(bml_import_from_dense_ellpack) (bml_dense_order_t order,
int N, void *A,
double threshold, int M,
bml_distribution_mode_t
distrib_mode)
{
bml_matrix_ellpack_t *A_bml =
TYPED_FUNC(bml_zero_matrix_ellpack) (N, M, distrib_mode);
int *A_index = A_bml->index;
int *A_nnz = A_bml->nnz;
REAL_T *dense_A = (REAL_T *) A;
REAL_T *A_value = A_bml->value;
#pragma omp parallel for shared(A_value, A_index, A_nnz, dense_A)
for (int i = 0; i < N; i++)
{
A_nnz[i] = 0;
for (int j = 0; j < N; j++)
{
REAL_T A_ij;
switch (order)
{
case dense_row_major:
A_ij = dense_A[ROWMAJOR(i, j, N, N)];
break;
case dense_column_major:
A_ij = dense_A[COLMAJOR(i, j, N, N)];
break;
default:
LOG_ERROR("unknown order\n");
break;
}
if (is_above_threshold(A_ij, threshold))
{
A_value[ROWMAJOR(i, A_nnz[i], N, M)] = A_ij;
A_index[ROWMAJOR(i, A_nnz[i], N, M)] = j;
A_nnz[i]++;
}
}
}
#ifdef USE_OMP_OFFLOAD
#pragma omp target update to(A_value[:N*M], A_index[:N*M], A_nnz[:N])
#endif
return A_bml;
}
|
DRB062-matrixvector2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Matrix-vector multiplication: inner level parallelization.
*/
#define N 1000
double a[N][N],v[N],v_out[N];
int init()
{
int i,j,k;
#pragma omp parallel for private(i ,j )
for (i = 0; i < N; i++) {
#pragma omp parallel for private(j )
for (j = 0; j < N; j++) {
a[i][j] = i * j + 0.01;
}
v_out[i] = i * j + 0.01;
v[i] = i * j + 0.01;
}
return 0;
}
void mv()
{
int i,j;
#pragma omp parallel for private(i ,j )
for (i = 0; i < N; i++)
{
double sum = 0.0;
#pragma omp parallel for reduction(+:sum)
for (j = 0; j < N; j++)
{
sum += a[i][j]*v[j];
}
v_out[i] = sum;
}
}
int print()
{
int i,j,k;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf("%lf\n", a[i][j]);
}
printf("%lf\n",v_out[i]);
printf("%lf\n",v[i]);
}
return 0;
}
int main()
{
init();
mv();
print();
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.