source
stringlengths
3
92
c
stringlengths
26
2.25M
geopm_sched.c
/* * Copyright (c) 2015 - 2022, Intel Corporation * SPDX-License-Identifier: BSD-3-Clause */ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #include <stdlib.h> #include <string.h> #include <stdio.h> #include <stdint.h> #include <unistd.h> #include <pthread.h> #include <errno.h> #include <string.h> #include "geopm_sched.h" #include "geopm_error.h" #include "config.h" #ifdef _OPENMP #include <omp.h> #endif int geopm_sched_num_cpu(void) { return sysconf(_SC_NPROCESSORS_CONF); } int geopm_sched_get_cpu(void) { return sched_getcpu(); } static pthread_once_t g_proc_cpuset_once = PTHREAD_ONCE_INIT; static cpu_set_t *g_proc_cpuset = NULL; static size_t g_proc_cpuset_size = 0; /* If /proc/self/status is usable and correct then parse this file to determine the process affinity. */ int geopm_sched_proc_cpuset_helper(int num_cpu, uint32_t *proc_cpuset, FILE *fid) { const char *key = "Cpus_allowed:"; const size_t key_len = strlen(key); const int num_read = num_cpu / 32 + (num_cpu % 32 ? 1 : 0); int err = 0; char *line = NULL; size_t line_len = 0; int read_idx = 0; while ((getline(&line, &line_len, fid)) != -1) { if (strncmp(line, key, key_len) == 0) { char *line_ptr = line + key_len; /* On some systems we have seen the mask padded with zeros beyond the number of online CPUs. Deal with this by skipping extra leading 32 bit masks */ int num_comma = 0; char *comma_ptr = line_ptr; while ((comma_ptr = strchr(comma_ptr, ','))) { ++comma_ptr; ++num_comma; } if (num_comma > num_read - 1) { num_comma -= num_read - 1; for (int i = 0; !err && i < num_comma; ++i) { line_ptr = strchr(line_ptr, ','); if (!line_ptr) { err = GEOPM_ERROR_LOGIC; } else { ++line_ptr; } } } for (read_idx = num_read - 1; !err && read_idx >= 0; --read_idx) { int num_match = sscanf(line_ptr, "%x", proc_cpuset + read_idx); if (num_match != 1) { err = GEOPM_ERROR_RUNTIME; } else { line_ptr = strchr(line_ptr, ','); if (read_idx != 0 && line_ptr == NULL) { err = GEOPM_ERROR_RUNTIME; } else { ++line_ptr; } } } } } if (line) { free(line); } if (read_idx != -1) { err = GEOPM_ERROR_RUNTIME; } return err; } static void geopm_proc_cpuset_once(void) { const char *status_path = "/proc/self/status"; const int num_cpu = geopm_sched_num_cpu(); const int num_read = num_cpu / 32 + (num_cpu % 32 ? 1 : 0); int err = 0; uint32_t *proc_cpuset = NULL; FILE *fid = NULL; g_proc_cpuset = CPU_ALLOC(num_cpu); if (g_proc_cpuset == NULL) { err = ENOMEM; } if (!err) { g_proc_cpuset_size = CPU_ALLOC_SIZE(num_cpu); proc_cpuset = calloc(num_read, sizeof(*proc_cpuset)); if (proc_cpuset == NULL) { err = ENOMEM; } } if (!err) { fid = fopen(status_path, "r"); if (!fid) { err = errno ? errno : GEOPM_ERROR_RUNTIME; } } if (!err) { err = geopm_sched_proc_cpuset_helper(num_cpu, proc_cpuset, fid); } if (fid) { fclose(fid); } if (!err) { /* cpu_set_t is managed in units of unsigned long, and may have extra * bits at the end with undefined values. If that happens, * g_proc_cpuset_size may be greater than the size of proc_cpuset, * resulting in reading past the end of proc_cpuset. Avoid this by * only copying the number of bytes needed to contain the mask. Zero * the destination first, since it may not be fully overwritten. * * See the CPU_SET(3) man page for more details about cpu_set_t. */ CPU_ZERO_S(g_proc_cpuset_size, g_proc_cpuset); memcpy(g_proc_cpuset, proc_cpuset, num_read * sizeof(*proc_cpuset)); } else if (g_proc_cpuset) { for (int i = 0; i < num_cpu; ++i) { CPU_SET_S(i, g_proc_cpuset_size, g_proc_cpuset); } } if (proc_cpuset) { free(proc_cpuset); } } int geopm_sched_proc_cpuset(int num_cpu, cpu_set_t *proc_cpuset) { int err = pthread_once(&g_proc_cpuset_once, geopm_proc_cpuset_once); int sched_num_cpu = geopm_sched_num_cpu(); size_t cpuset_size = CPU_ALLOC_SIZE(num_cpu); if (!err && cpuset_size < g_proc_cpuset_size) { err = GEOPM_ERROR_INVALID; } if (!err) { /* Copy up to the smaller of the sizes to avoid buffer overruns. Zero * the destination set first, since it may not be fully overwritten */ CPU_ZERO_S(cpuset_size, proc_cpuset); memcpy(proc_cpuset, g_proc_cpuset, g_proc_cpuset_size); for (int i = sched_num_cpu; i < num_cpu; ++i) { CPU_CLR_S(i, cpuset_size, proc_cpuset); } } return err; } int geopm_sched_woomp(int num_cpu, cpu_set_t *woomp) { /*! @brief Function that returns a cpuset that has bits set for all CPUs enabled for the process which are not used by OpenMP. Rather than returning an empty mask, if all CPUs allocated for the process are used by OpenMP, then the woomp mask will have all bits set. */ int err = pthread_once(&g_proc_cpuset_once, geopm_proc_cpuset_once); int sched_num_cpu = geopm_sched_num_cpu(); size_t req_alloc_size = CPU_ALLOC_SIZE(num_cpu); if (!err && !g_proc_cpuset) { err = ENOMEM; } if (!err && req_alloc_size < g_proc_cpuset_size) { err = EINVAL; } if (!err) { /* Copy the process CPU mask into the output. */ CPU_ZERO_S(req_alloc_size, woomp); memcpy(woomp, g_proc_cpuset, g_proc_cpuset_size); /* Start an OpenMP parallel region and have each thread clear its bit from the mask. */ #ifdef _OPENMP #pragma omp parallel default(shared) { #pragma omp critical { int cpu_index = sched_getcpu(); if (cpu_index != -1 && cpu_index < num_cpu) { /* Clear the bit for this OpenMP thread's CPU. */ CPU_CLR_S(cpu_index, g_proc_cpuset_size, woomp); } else { err = errno ? errno : GEOPM_ERROR_LOGIC; } } /* end pragma omp critical */ } /* end pragma omp parallel */ #endif /* _OPENMP */ } if (!err) { for (int i = sched_num_cpu; i < num_cpu; ++i) { CPU_CLR_S(i, req_alloc_size, woomp); } } if (err || CPU_COUNT_S(g_proc_cpuset_size, woomp) == 0) { /* If all CPUs are used by the OpenMP gang, then leave the mask open and allow the Linux scheduler to choose. */ for (int i = 0; i < num_cpu; ++i) { CPU_SET_S(i, g_proc_cpuset_size, woomp); } } return err; }
mmp.c
#include "XSbench_header.h" #ifdef MPI #include<mpi.h> #endif int main( int argc, char* argv[] ) { // ===================================================================== // Initialization & Command Line Read-In // ===================================================================== int version = 19; int mype = 0; double omp_start, omp_end; int nprocs = 1; unsigned long long verification; #ifdef MPI MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); MPI_Comm_rank(MPI_COMM_WORLD, &mype); #endif // Process CLI Fields -- store in "Inputs" structure Inputs in = read_CLI( argc, argv ); // Set number of OpenMP Threads #ifdef OPENMP omp_set_num_threads(in.nthreads); #endif // Print-out of Input Summary if( mype == 0 ) print_inputs( in, nprocs, version ); // ===================================================================== // Prepare Nuclide Energy Grids, Unionized Energy Grid, & Material Data // This is not reflective of a real Monte Carlo simulation workload, // therefore, do not profile this region! // ===================================================================== SimulationData SD; // If read from file mode is selected, skip initialization and load // all simulation data structures from file instead if( in.binary_mode == READ ) SD = binary_read(in); else SD = grid_init_do_not_profile( in, mype ); // If writing from file mode is selected, write all simulation data // structures to file if( in.binary_mode == WRITE && mype == 0 ) binary_write(in, SD); // ===================================================================== // Cross Section (XS) Parallel Lookup Simulation // This is the section that should be profiled, as it reflects a // realistic continuous energy Monte Carlo macroscopic cross section // lookup kernel. // ===================================================================== if( mype == 0 ) { printf("\n"); border_print(); center_print("SIMULATION", 79); border_print(); } // Start Simulation Timer omp_start = get_time(); // Run simulation if( in.simulation_method == EVENT_BASED ) { if( in.kernel_id == 0 ) verification = run_event_based_simulation(in, SD, mype); else if( in.kernel_id == 1 ) verification = run_event_based_simulation_optimization_1(in, SD, mype); else { printf("Error: No kernel ID %d found!\n", in.kernel_id); exit(1); } } else verification = run_history_based_simulation(in, SD, mype); if( mype == 0) { printf("\n" ); printf("Simulation complete.\n" ); } // End Simulation Timer omp_end = get_time(); // ===================================================================== // Output Results & Finalize // ===================================================================== // Final Hash Step verification = verification % 999983; // Print / Save Results and Exit int is_invalid_result = print_results( in, mype, omp_end-omp_start, nprocs, verification ); #ifdef MPI MPI_Finalize(); #endif return is_invalid_result; } //io.c // Prints program logo void logo(int version) { border_print(); printf( " __ __ ___________ _ \n" " \\ \\ / // ___| ___ \\ | | \n" " \\ V / \\ `--.| |_/ / ___ _ __ ___| |__ \n" " / \\ `--. \\ ___ \\/ _ \\ '_ \\ / __| '_ \\ \n" " / /^\\ \\/\\__/ / |_/ / __/ | | | (__| | | | \n" " \\/ \\/\\____/\\____/ \\___|_| |_|\\___|_| |_| \n\n" ); border_print(); center_print("Developed at Argonne National Laboratory", 79); char v[100]; sprintf(v, "Version: %d", version); center_print(v, 79); border_print(); } // Prints Section titles in center of 80 char terminal void center_print(const char *s, int width) { int length = strlen(s); int i; for (i=0; i<=(width-length)/2; i++) { fputs(" ", stdout); } fputs(s, stdout); fputs("\n", stdout); } int print_results( Inputs in, int mype, double runtime, int nprocs, unsigned long long vhash ) { // Calculate Lookups per sec int lookups = 0; if( in.simulation_method == HISTORY_BASED ) lookups = in.lookups * in.particles; else if( in.simulation_method == EVENT_BASED ) lookups = in.lookups; int lookups_per_sec = (int) ((double) lookups / runtime); // If running in MPI, reduce timing statistics and calculate average #ifdef MPI int total_lookups = 0; MPI_Barrier(MPI_COMM_WORLD); MPI_Reduce(&lookups_per_sec, &total_lookups, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD); #endif int is_invalid_result = 1; // Print output if( mype == 0 ) { border_print(); center_print("RESULTS", 79); border_print(); // Print the results printf("Threads: %d\n", in.nthreads); #ifdef MPI printf("MPI ranks: %d\n", nprocs); #endif #ifdef MPI printf("Total Lookups/s: "); fancy_int(total_lookups); printf("Avg Lookups/s per MPI rank: "); fancy_int(total_lookups / nprocs); #else printf("Runtime: %.3lf seconds\n", runtime); printf("Lookups: "); fancy_int(lookups); printf("Lookups/s: "); fancy_int(lookups_per_sec); #endif } unsigned long long large = 0; unsigned long long small = 0; if( in.simulation_method == EVENT_BASED ) { small = 945990; large = 952131; } else if( in.simulation_method == HISTORY_BASED ) { small = 941535; large = 954318; } if( strcmp(in.HM, "large") == 0 ) { if( vhash == large ) is_invalid_result = 0; } else if( strcmp(in.HM, "small") == 0 ) { if( vhash == small ) is_invalid_result = 0; } if(mype == 0 ) { if( is_invalid_result ) printf("Verification checksum: %llu (WARNING - INAVALID CHECKSUM!)\n", vhash); else printf("Verification checksum: %llu (Valid)\n", vhash); border_print(); } return is_invalid_result; } void print_inputs(Inputs in, int nprocs, int version ) { // Calculate Estimate of Memory Usage int mem_tot = estimate_mem_usage( in ); logo(version); center_print("INPUT SUMMARY", 79); border_print(); if( in.simulation_method == EVENT_BASED ) printf("Simulation Method: Event Based\n"); else printf("Simulation Method: History Based\n"); if( in.grid_type == NUCLIDE ) printf("Grid Type: Nuclide Grid\n"); else if( in.grid_type == UNIONIZED ) printf("Grid Type: Unionized Grid\n"); else printf("Grid Type: Hash\n"); printf("Materials: %d\n", 12); printf("H-M Benchmark Size: %s\n", in.HM); printf("Total Nuclides: %ld\n", in.n_isotopes); printf("Gridpoints (per Nuclide): "); fancy_int(in.n_gridpoints); if( in.grid_type == HASH ) { printf("Hash Bins: "); fancy_int(in.hash_bins); } if( in.grid_type == UNIONIZED ) { printf("Unionized Energy Gridpoints: "); fancy_int(in.n_isotopes*in.n_gridpoints); } if( in.simulation_method == HISTORY_BASED ) { printf("Particle Histories: "); fancy_int(in.particles); printf("XS Lookups per Particle: "); fancy_int(in.lookups); } printf("Total XS Lookups: "); fancy_int(in.lookups); #ifdef MPI printf("MPI Ranks: %d\n", nprocs); printf("OMP Threads per MPI Rank: %d\n", in.nthreads); printf("Mem Usage per MPI Rank (MB): "); fancy_int(mem_tot); #else printf("Threads: %d\n", in.nthreads); printf("Est. Memory Usage (MB): "); fancy_int(mem_tot); #endif printf("Binary File Mode: "); if( in.binary_mode == NONE ) printf("Off\n"); else if( in.binary_mode == READ) printf("Read\n"); else printf("Write\n"); border_print(); center_print("INITIALIZATION - DO NOT PROFILE", 79); border_print(); } void border_print(void) { printf( "===================================================================" "=============\n"); } // Prints comma separated integers - for ease of reading void fancy_int( long a ) { if( a < 1000 ) printf("%ld\n",a); else if( a >= 1000 && a < 1000000 ) printf("%ld,%03ld\n", a / 1000, a % 1000); else if( a >= 1000000 && a < 1000000000 ) printf("%ld,%03ld,%03ld\n",a / 1000000,(a % 1000000) / 1000,a % 1000 ); else if( a >= 1000000000 ) printf("%ld,%03ld,%03ld,%03ld\n", a / 1000000000, (a % 1000000000) / 1000000, (a % 1000000) / 1000, a % 1000 ); else printf("%ld\n",a); } void print_CLI_error(void) { printf("Usage: ./XSBench <options>\n"); printf("Options include:\n"); printf(" -m <simulation method> Simulation method (history, event)\n"); printf(" -t <threads> Number of OpenMP threads to run\n"); printf(" -s <size> Size of H-M Benchmark to run (small, large, XL, XXL)\n"); printf(" -g <gridpoints> Number of gridpoints per nuclide (overrides -s defaults)\n"); printf(" -G <grid type> Grid search type (unionized, nuclide, hash). Defaults to unionized.\n"); printf(" -p <particles> Number of particle histories\n"); printf(" -l <lookups> History Based: Number of Cross-section (XS) lookups per particle. Event Based: Total number of XS lookups.\n"); printf(" -h <hash bins> Number of hash bins (only relevant when used with \"-G hash\")\n"); printf(" -b <binary mode> Read or write all data structures to file. If reading, this will skip initialization phase. (read, write)\n"); printf(" -k <kernel ID> Specifies which kernel to run. 0 is baseline, 1, 2, etc are optimized variants. (0 is default.)\n"); printf("Default is equivalent to: -m history -s large -l 34 -p 500000 -G unionized\n"); printf("See readme for full description of default run values\n"); exit(4); } Inputs read_CLI( int argc, char * argv[] ) { Inputs input; // defaults to the history based simulation method input.simulation_method = HISTORY_BASED; // defaults to max threads on the system #ifdef OPENMP //input.nthreads = omp_get_num_procs(); input.nthreads = #P0; #else input.nthreads = 1; #endif // defaults to 355 (corresponding to H-M Large benchmark) input.n_isotopes = 355; // defaults to 11303 (corresponding to H-M Large benchmark) input.n_gridpoints = 11303; // defaults to 500,000 input.particles = 500000; // defaults to 34 input.lookups = 34; // default to unionized grid input.grid_type = UNIONIZED; // default to unionized grid input.hash_bins = 10000; // default to no binary read/write input.binary_mode = NONE; // defaults to baseline kernel input.kernel_id = 0; // defaults to H-M Large benchmark input.HM = (char *) malloc( 6 * sizeof(char) ); input.HM[0] = 'l' ; input.HM[1] = 'a' ; input.HM[2] = 'r' ; input.HM[3] = 'g' ; input.HM[4] = 'e' ; input.HM[5] = '\0'; // Check if user sets these int user_g = 0; int default_lookups = 1; int default_particles = 1; // Collect Raw Input for( int i = 1; i < argc; i++ ) { char * arg = argv[i]; // nthreads (-t) if( strcmp(arg, "-t") == 0 ) { if( ++i < argc ) input.nthreads = atoi(argv[i]); else print_CLI_error(); } // n_gridpoints (-g) else if( strcmp(arg, "-g") == 0 ) { if( ++i < argc ) { user_g = 1; input.n_gridpoints = atol(argv[i]); } else print_CLI_error(); } // Simulation Method (-m) else if( strcmp(arg, "-m") == 0 ) { char * sim_type; if( ++i < argc ) sim_type = argv[i]; else print_CLI_error(); if( strcmp(sim_type, "history") == 0 ) input.simulation_method = HISTORY_BASED; else if( strcmp(sim_type, "event") == 0 ) { input.simulation_method = EVENT_BASED; // Also resets default # of lookups if( default_lookups && default_particles ) { input.lookups = input.lookups * input.particles; input.particles = 0; } } else print_CLI_error(); } // lookups (-l) else if( strcmp(arg, "-l") == 0 ) { if( ++i < argc ) { input.lookups = atoi(argv[i]); default_lookups = 0; } else print_CLI_error(); } // hash bins (-h) else if( strcmp(arg, "-h") == 0 ) { if( ++i < argc ) input.hash_bins = atoi(argv[i]); else print_CLI_error(); } // particles (-p) else if( strcmp(arg, "-p") == 0 ) { if( ++i < argc ) { input.particles = atoi(argv[i]); default_particles = 0; } else print_CLI_error(); } // HM (-s) else if( strcmp(arg, "-s") == 0 ) { if( ++i < argc ) input.HM = argv[i]; else print_CLI_error(); } // grid type (-G) else if( strcmp(arg, "-G") == 0 ) { char * grid_type; if( ++i < argc ) grid_type = argv[i]; else print_CLI_error(); if( strcmp(grid_type, "unionized") == 0 ) input.grid_type = UNIONIZED; else if( strcmp(grid_type, "nuclide") == 0 ) input.grid_type = NUCLIDE; else if( strcmp(grid_type, "hash") == 0 ) input.grid_type = HASH; else print_CLI_error(); } // binary mode (-b) else if( strcmp(arg, "-b") == 0 ) { char * binary_mode; if( ++i < argc ) binary_mode = argv[i]; else print_CLI_error(); if( strcmp(binary_mode, "read") == 0 ) input.binary_mode = READ; else if( strcmp(binary_mode, "write") == 0 ) input.binary_mode = WRITE; else print_CLI_error(); } // kernel optimization selection (-k) else if( strcmp(arg, "-k") == 0 ) { if( ++i < argc ) { input.kernel_id = atoi(argv[i]); } else print_CLI_error(); } else print_CLI_error(); } // Validate Input // Validate nthreads if( input.nthreads < 1 ) print_CLI_error(); // Validate n_isotopes if( input.n_isotopes < 1 ) print_CLI_error(); // Validate n_gridpoints if( input.n_gridpoints < 1 ) print_CLI_error(); // Validate lookups if( input.lookups < 1 ) print_CLI_error(); // Validate Hash Bins if( input.hash_bins < 1 ) print_CLI_error(); // Validate HM size if( strcasecmp(input.HM, "small") != 0 && strcasecmp(input.HM, "large") != 0 && strcasecmp(input.HM, "XL") != 0 && strcasecmp(input.HM, "XXL") != 0 ) print_CLI_error(); // Set HM size specific parameters // (defaults to large) if( strcasecmp(input.HM, "small") == 0 ) input.n_isotopes = 68; else if( strcasecmp(input.HM, "XL") == 0 && user_g == 0 ) input.n_gridpoints = 238847; // sized to make 120 GB XS data else if( strcasecmp(input.HM, "XXL") == 0 && user_g == 0 ) input.n_gridpoints = 238847 * 2.1; // 252 GB XS data // Return input struct return input; } void binary_write( Inputs in, SimulationData SD ) { char * fname = "XS_data.dat"; printf("Writing all data structures to binary file %s...\n", fname); FILE * fp = fopen(fname, "w"); // Write SimulationData Object. Include pointers, even though we won't be using them. fwrite(&SD, sizeof(SimulationData), 1, fp); // Write heap arrays in SimulationData Object fwrite(SD.num_nucs, sizeof(int), SD.length_num_nucs, fp); fwrite(SD.concs, sizeof(double), SD.length_concs, fp); fwrite(SD.mats, sizeof(int), SD.length_mats, fp); fwrite(SD.nuclide_grid, sizeof(NuclideGridPoint), SD.length_nuclide_grid, fp); fwrite(SD.index_grid, sizeof(int), SD.length_index_grid, fp); fwrite(SD.unionized_energy_array, sizeof(double), SD.length_unionized_energy_array, fp); fclose(fp); } SimulationData binary_read( Inputs in ) { SimulationData SD; char * fname = "XS_data.dat"; printf("Reading all data structures from binary file %s...\n", fname); FILE * fp = fopen(fname, "r"); assert(fp != NULL); // Read SimulationData Object. Include pointers, even though we won't be using them. fread(&SD, sizeof(SimulationData), 1, fp); // Allocate space for arrays on heap SD.num_nucs = (int *) malloc(SD.length_num_nucs * sizeof(int)); SD.concs = (double *) malloc(SD.length_concs * sizeof(double)); SD.mats = (int *) malloc(SD.length_mats * sizeof(int)); SD.nuclide_grid = (NuclideGridPoint *) malloc(SD.length_nuclide_grid * sizeof(NuclideGridPoint)); SD.index_grid = (int *) malloc( SD.length_index_grid * sizeof(int)); SD.unionized_energy_array = (double *) malloc( SD.length_unionized_energy_array * sizeof(double)); // Read heap arrays into SimulationData Object fread(SD.num_nucs, sizeof(int), SD.length_num_nucs, fp); fread(SD.concs, sizeof(double), SD.length_concs, fp); fread(SD.mats, sizeof(int), SD.length_mats, fp); fread(SD.nuclide_grid, sizeof(NuclideGridPoint), SD.length_nuclide_grid, fp); fread(SD.index_grid, sizeof(int), SD.length_index_grid, fp); fread(SD.unionized_energy_array, sizeof(double), SD.length_unionized_energy_array, fp); fclose(fp); return SD; } //Simulation.c //////////////////////////////////////////////////////////////////////////////////// // BASELINE FUNCTIONS //////////////////////////////////////////////////////////////////////////////////// // All "baseline" code is at the top of this file. The baseline code is a simple // implementation of the algorithm, with only minor CPU optimizations in place. // Following these functions are a number of optimized variants, // which each deploy a different combination of optimizations strategies. By // default, XSBench will only run the baseline implementation. Optimized variants // must be specifically selected using the "-k <optimized variant ID>" command // line argument. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation(Inputs in, SimulationData SD, int mype) { if( mype == 0) printf("Beginning event based simulation...\n"); //////////////////////////////////////////////////////////////////////////////// // SUMMARY: Simulation Data Structure Manifest for "SD" Object // Here we list all heap arrays (and lengths) in SD that would need to be // offloaded manually if using an accelerator with a seperate memory space //////////////////////////////////////////////////////////////////////////////// // int * num_nucs; // Length = length_num_nucs; // double * concs; // Length = length_concs // int * mats; // Length = length_mats // double * unionized_energy_array; // Length = length_unionized_energy_array // int * index_grid; // Length = length_index_grid // NuclideGridPoint * nuclide_grid; // Length = length_nuclide_grid // // Note: "unionized_energy_array" and "index_grid" can be of zero length // depending on lookup method. // // Note: "Lengths" are given as the number of objects in the array, not the // number of bytes. //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Begin Actual Simulation Loop //////////////////////////////////////////////////////////////////////////////// unsigned long long verification = 0; #pragma omp parallel for schedule(dynamic,#P1) reduction(+:verification) for( int i = 0; i < in.lookups; i++ ) { // Set the initial seed value uint64_t seed = STARTING_SEED; // Forward seed to lookup index (we need 2 samples per lookup) seed = fast_forward_LCG(seed, 2*i); // Randomly pick an energy and material for the particle double p_energy = LCG_random_double(&seed); int mat = pick_mat(&seed); double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( p_energy, // Sampled neutron energy (in lethargy) mat, // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation SD.num_nucs, // 1-D array with number of nuclides per material SD.concs, // Flattened 2-D array with concentration of each nuclide in each material SD.unionized_energy_array, // 1-D Unionized energy array SD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level SD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation SD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) SD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we prevent thread // contention by using an OMP reduction on the verification value. // For accelerators, a different approach might be required // (e.g., atomics, reduction of thread-specific values in large // array via CUDA thrust, etc). double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } verification += max_idx+1; } return verification; } unsigned long long run_history_based_simulation(Inputs in, SimulationData SD, int mype) { if( mype == 0) printf("Beginning history based simulation...\n"); //////////////////////////////////////////////////////////////////////////////// // SUMMARY: Simulation Data Structure Manifest for "SD" Object // Here we list all heap arrays (and lengths) in SD that would need to be // offloaded manually if using an accelerator with a seperate memory space //////////////////////////////////////////////////////////////////////////////// // int * num_nucs; // Length = length_num_nucs; // double * concs; // Length = length_concs // int * mats; // Length = length_mats // double * unionized_energy_array; // Length = length_unionized_energy_array // int * index_grid; // Length = length_index_grid // NuclideGridPoint * nuclide_grid; // Length = length_nuclide_grid // // Note: "unionized_energy_array" and "index_grid" can be of zero length // depending on lookup method. // // Note: "Lengths" are given as the number of objects in the array, not the // number of bytes. //////////////////////////////////////////////////////////////////////////////// unsigned long long verification = 0; // Begin outer lookup loop over particles. This loop is independent. #pragma omp parallel for schedule(dynamic, #P1) reduction(+:verification) for( int p = 0; p < in.particles; p++ ) { // Set the initial seed value uint64_t seed = STARTING_SEED; // Forward seed to lookup index (we need 2 samples per lookup, and // we may fast forward up to 5 times after each lookup) seed = fast_forward_LCG(seed, p*in.lookups*2*5); // Randomly pick an energy and material for the particle double p_energy = LCG_random_double(&seed); int mat = pick_mat(&seed); // Inner XS Lookup Loop // This loop is dependent! // i.e., Next iteration uses data computed in previous iter. for( int i = 0; i < in.lookups; i++ ) { double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( p_energy, // Sampled neutron energy (in lethargy) mat, // Sampled material type neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation SD.num_nucs, // 1-D array with number of nuclides per material SD.concs, // Flattened 2-D array with concentration of each nuclide in each material SD.unionized_energy_array, // 1-D Unionized energy array SD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level SD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation SD.mats, // Flattened 2-D array with nuclide indices for each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookups) SD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we prevent thread // contention by using an OMP reduction on it. For other accelerators, // a different approach might be required (e.g., atomics, reduction // of thread-specific values in large array via CUDA thrust, etc) double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } verification += max_idx+1; // Randomly pick next energy and material for the particle // Also incorporates results from macro_xs lookup to // enforce loop dependency. // In a real MC app, this dependency is expressed in terms // of branching physics sampling, whereas here we are just // artificially enforcing this dependence based on fast // forwarding the LCG state uint64_t n_forward = 0; for( int j = 0; j < 5; j++ ) if( macro_xs_vector[j] > 1.0 ) n_forward++; if( n_forward > 0 ) seed = fast_forward_LCG(seed, n_forward); p_energy = LCG_random_double(&seed); mat = pick_mat(&seed); } } return verification; } // Calculates the microscopic cross section for a given nuclide & energy void calculate_micro_xs( double p_energy, int nuc, long n_isotopes, long n_gridpoints, double * restrict egrid, int * restrict index_data, NuclideGridPoint * restrict nuclide_grids, long idx, double * restrict xs_vector, int grid_type, int hash_bins ){ // Variables double f; NuclideGridPoint * low, * high; // If using only the nuclide grid, we must perform a binary search // to find the energy location in this particular nuclide's grid. if( grid_type == NUCLIDE ) { // Perform binary search on the Nuclide Grid to find the index idx = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], 0, n_gridpoints-1); // pull ptr from nuclide grid and check to ensure that // we're not reading off the end of the nuclide's grid if( idx == n_gridpoints - 1 ) low = &nuclide_grids[nuc*n_gridpoints + idx - 1]; else low = &nuclide_grids[nuc*n_gridpoints + idx]; } else if( grid_type == UNIONIZED) // Unionized Energy Grid - we already know the index, no binary search needed. { // pull ptr from energy grid and check to ensure that // we're not reading off the end of the nuclide's grid if( index_data[idx * n_isotopes + nuc] == n_gridpoints - 1 ) low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc] - 1]; else low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc]]; } else // Hash grid { // load lower bounding index int u_low = index_data[idx * n_isotopes + nuc]; // Determine higher bounding index int u_high; if( idx == hash_bins - 1 ) u_high = n_gridpoints - 1; else u_high = index_data[(idx+1)*n_isotopes + nuc] + 1; // Check edge cases to make sure energy is actually between these // Then, if things look good, search for gridpoint in the nuclide grid // within the lower and higher limits we've calculated. double e_low = nuclide_grids[nuc*n_gridpoints + u_low].energy; double e_high = nuclide_grids[nuc*n_gridpoints + u_high].energy; int lower; if( p_energy <= e_low ) lower = 0; else if( p_energy >= e_high ) lower = n_gridpoints - 1; else lower = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], u_low, u_high); if( lower == n_gridpoints - 1 ) low = &nuclide_grids[nuc*n_gridpoints + lower - 1]; else low = &nuclide_grids[nuc*n_gridpoints + lower]; } high = low + 1; // calculate the re-useable interpolation factor f = (high->energy - p_energy) / (high->energy - low->energy); // Total XS xs_vector[0] = high->total_xs - f * (high->total_xs - low->total_xs); // Elastic XS xs_vector[1] = high->elastic_xs - f * (high->elastic_xs - low->elastic_xs); // Absorbtion XS xs_vector[2] = high->absorbtion_xs - f * (high->absorbtion_xs - low->absorbtion_xs); // Fission XS xs_vector[3] = high->fission_xs - f * (high->fission_xs - low->fission_xs); // Nu Fission XS xs_vector[4] = high->nu_fission_xs - f * (high->nu_fission_xs - low->nu_fission_xs); } // Calculates macroscopic cross section based on a given material & energy void calculate_macro_xs( double p_energy, int mat, long n_isotopes, long n_gridpoints, int * restrict num_nucs, double * restrict concs, double * restrict egrid, int * restrict index_data, NuclideGridPoint * restrict nuclide_grids, int * restrict mats, double * restrict macro_xs_vector, int grid_type, int hash_bins, int max_num_nucs ){ int p_nuc; // the nuclide we are looking up long idx = -1; double conc; // the concentration of the nuclide in the material // cleans out macro_xs_vector for( int k = 0; k < 5; k++ ) macro_xs_vector[k] = 0; // If we are using the unionized energy grid (UEG), we only // need to perform 1 binary search per macroscopic lookup. // If we are using the nuclide grid search, it will have to be // done inside of the "calculate_micro_xs" function for each different // nuclide in the material. if( grid_type == UNIONIZED ) idx = grid_search( n_isotopes * n_gridpoints, p_energy, egrid); else if( grid_type == HASH ) { double du = 1.0 / hash_bins; idx = p_energy / du; } // Once we find the pointer array on the UEG, we can pull the data // from the respective nuclide grids, as well as the nuclide // concentration data for the material // Each nuclide from the material needs to have its micro-XS array // looked up & interpolatied (via calculate_micro_xs). Then, the // micro XS is multiplied by the concentration of that nuclide // in the material, and added to the total macro XS array. // (Independent -- though if parallelizing, must use atomic operations // or otherwise control access to the xs_vector and macro_xs_vector to // avoid simulataneous writing to the same data structure) for( int j = 0; j < num_nucs[mat]; j++ ) { double xs_vector[5]; p_nuc = mats[mat*max_num_nucs + j]; conc = concs[mat*max_num_nucs + j]; calculate_micro_xs( p_energy, p_nuc, n_isotopes, n_gridpoints, egrid, index_data, nuclide_grids, idx, xs_vector, grid_type, hash_bins ); for( int k = 0; k < 5; k++ ) macro_xs_vector[k] += xs_vector[k] * conc; } } // binary search for energy on unionized energy grid // returns lower index long grid_search( long n, double quarry, double * restrict A) { long lowerLimit = 0; long upperLimit = n-1; long examinationPoint; long length = upperLimit - lowerLimit; while( length > 1 ) { examinationPoint = lowerLimit + ( length / 2 ); if( A[examinationPoint] > quarry ) upperLimit = examinationPoint; else lowerLimit = examinationPoint; length = upperLimit - lowerLimit; } return lowerLimit; } // binary search for energy on nuclide energy grid long grid_search_nuclide( long n, double quarry, NuclideGridPoint * A, long low, long high) { long lowerLimit = low; long upperLimit = high; long examinationPoint; long length = upperLimit - lowerLimit; while( length > 1 ) { examinationPoint = lowerLimit + ( length / 2 ); if( A[examinationPoint].energy > quarry ) upperLimit = examinationPoint; else lowerLimit = examinationPoint; length = upperLimit - lowerLimit; } return lowerLimit; } // picks a material based on a probabilistic distribution int pick_mat( uint64_t * seed ) { // I have a nice spreadsheet supporting these numbers. They are // the fractions (by volume) of material in the core. Not a // *perfect* approximation of where XS lookups are going to occur, // but this will do a good job of biasing the system nonetheless. double dist[12]; dist[0] = 0.140; // fuel dist[1] = 0.052; // cladding dist[2] = 0.275; // cold, borated water dist[3] = 0.134; // hot, borated water dist[4] = 0.154; // RPV dist[5] = 0.064; // Lower, radial reflector dist[6] = 0.066; // Upper reflector / top plate dist[7] = 0.055; // bottom plate dist[8] = 0.008; // bottom nozzle dist[9] = 0.015; // top nozzle dist[10] = 0.025; // top of fuel assemblies dist[11] = 0.013; // bottom of fuel assemblies double roll = LCG_random_double(seed); // makes a pick based on the distro for( int i = 0; i < 12; i++ ) { double running = 0; for( int j = i; j > 0; j-- ) running += dist[j]; if( roll < running ) return i; } return 0; } double LCG_random_double(uint64_t * seed) { // LCG parameters const uint64_t m = 9223372036854775808ULL; // 2^63 const uint64_t a = 2806196910506780709ULL; const uint64_t c = 1ULL; *seed = (a * (*seed) + c) % m; return (double) (*seed) / (double) m; } uint64_t fast_forward_LCG(uint64_t seed, uint64_t n) { // LCG parameters const uint64_t m = 9223372036854775808ULL; // 2^63 uint64_t a = 2806196910506780709ULL; uint64_t c = 1ULL; n = n % m; uint64_t a_new = 1; uint64_t c_new = 0; while(n > 0) { if(n & 1) { a_new *= a; c_new = c_new * a + c; } c *= (a + 1); a *= a; n >>= 1; } return (a_new * seed + c_new) % m; } //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// // OPTIMIZED VARIANT FUNCTIONS //////////////////////////////////////////////////////////////////////////////////// // This section contains a number of optimized variants of some of the above // functions, which each deploy a different combination of optimizations strategies. // By default, XSBench will not run any of these variants. They // must be specifically selected using the "-k <optimized variant ID>" command // line argument. // // As fast parallel sorting will be required for these optimizations, we will // first define a set of key-value parallel quicksort routines. //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// // Parallel Quicksort Key-Value Sorting Algorithms //////////////////////////////////////////////////////////////////////////////////// // // These algorithms are based on the parallel quicksort implementation by // Eduard Lopez published at https://github.com/eduardlopez/quicksort-parallel // // Eduard's original version was for an integer type quicksort, but I have modified // it to form two different versions that can sort key-value pairs together without // having to bundle them into a separate object. Additionally, I have modified the // optimal chunk sizes and restricted the number of threads for the array sizing // that XSBench will be using by default. // // Eduard's original implementation carries the following license, which applies to // the following functions only: // // void quickSort_parallel_internal_i_d(int* key,double * value, int left, int right, int cutoff) // void quickSort_parallel_i_d(int* key,double * value, int lenArray, int numThreads) // void quickSort_parallel_internal_d_i(double* key,int * value, int left, int right, int cutoff) // void quickSort_parallel_d_i(double* key,int * value, int lenArray, int numThreads) // // The MIT License (MIT) // // Copyright (c) 2016 Eduard López // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. // //////////////////////////////////////////////////////////////////////////////////// void quickSort_parallel_internal_i_d(int* key,double * value, int left, int right, int cutoff) { int i = left, j = right; int tmp; int pivot = key[(left + right) / 2]; { while (i <= j) { while (key[i] < pivot) i++; while (key[j] > pivot) j--; if (i <= j) { tmp = key[i]; key[i] = key[j]; key[j] = tmp; double tmp_v = value[i]; value[i] = value[j]; value[j] = tmp_v; i++; j--; } } } if ( ((right-left)<cutoff) ){ if (left < j){ quickSort_parallel_internal_i_d(key, value, left, j, cutoff); } if (i < right){ quickSort_parallel_internal_i_d(key, value, i, right, cutoff); } }else{ #pragma omp task { quickSort_parallel_internal_i_d(key, value, left, j, cutoff); } #pragma omp task { quickSort_parallel_internal_i_d(key, value, i, right, cutoff); } } } void quickSort_parallel_i_d(int* key,double * value, int lenArray, int numThreads){ // Set minumum problem size to still spawn threads for int cutoff = 10000; // For this problem size, more than 16 threads on CPU is not helpful if( numThreads > 16 ) numThreads = 16; #pragma omp parallel num_threads(numThreads) { #pragma omp single nowait { quickSort_parallel_internal_i_d(key,value, 0, lenArray-1, cutoff); } } } void quickSort_parallel_internal_d_i(double* key,int * value, int left, int right, int cutoff) { int i = left, j = right; double tmp; double pivot = key[(left + right) / 2]; { while (i <= j) { while (key[i] < pivot) i++; while (key[j] > pivot) j--; if (i <= j) { tmp = key[i]; key[i] = key[j]; key[j] = tmp; int tmp_v = value[i]; value[i] = value[j]; value[j] = tmp_v; i++; j--; } } } if ( ((right-left)<cutoff) ){ if (left < j){ quickSort_parallel_internal_d_i(key, value, left, j, cutoff); } if (i < right){ quickSort_parallel_internal_d_i(key, value, i, right, cutoff); } }else{ #pragma omp task { quickSort_parallel_internal_d_i(key, value, left, j, cutoff); } #pragma omp task { quickSort_parallel_internal_d_i(key, value, i, right, cutoff); } } } void quickSort_parallel_d_i(double* key,int * value, int lenArray, int numThreads){ // Set minumum problem size to still spawn threads for int cutoff = 10000; // For this problem size, more than 16 threads on CPU is not helpful if( numThreads > 16 ) numThreads = 16; #pragma omp parallel num_threads(numThreads) { #pragma omp single nowait { quickSort_parallel_internal_d_i(key,value, 0, lenArray-1, cutoff); } } } //////////////////////////////////////////////////////////////////////////////////// // Optimization 1 -- Event-based Sample/XS Lookup kernel splitting + Sorting // lookups by material and energy //////////////////////////////////////////////////////////////////////////////////// // This kernel separates out the sampling and lookup regions of the event-based // model, and then sorts the lookups by material type and energy. The goal of this // optimization is to allow for greatly improved cache locality, and XS indices // loaded from memory may be re-used for multiple lookups. // // As efficienct sorting is key for performance, we also must implement an // efficient key-value parallel sorting algorithm. We also experimented with using // the C++ version of thrust for these purposes, but found that our own implemtation // was slightly faster than the thrust library version, so for speed and // simplicity we will do not add the thrust dependency. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation_optimization_1(Inputs in, SimulationData SD, int mype) { char * optimization_name = "Optimization 1 - Kernel splitting + full material & energy sort"; if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Allocating additional data required by optimized kernel...\n"); size_t sz; size_t total_sz = 0; double start, stop; sz = in.lookups * sizeof(double); SD.p_energy_samples = (double *) malloc(sz); total_sz += sz; SD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); SD.mat_samples = (int *) malloc(sz); total_sz += sz; SD.length_mat_samples = in.lookups; if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Begin Actual Simulation //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Sample Materials and Energies //////////////////////////////////////////////////////////////////////////////// #pragma omp parallel for schedule(dynamic, #P1) for( int i = 0; i < in.lookups; i++ ) { // Set the initial seed value uint64_t seed = STARTING_SEED; // Forward seed to lookup index (we need 2 samples per lookup) seed = fast_forward_LCG(seed, 2*i); // Randomly pick an energy and material for the particle double p_energy = LCG_random_double(&seed); int mat = pick_mat(&seed); SD.p_energy_samples[i] = p_energy; SD.mat_samples[i] = mat; } if(mype == 0) printf("finished sampling...\n"); //////////////////////////////////////////////////////////////////////////////// // Sort by Material //////////////////////////////////////////////////////////////////////////////// start = get_time(); quickSort_parallel_i_d(SD.mat_samples, SD.p_energy_samples, in.lookups, in.nthreads); stop = get_time(); if(mype == 0) printf("Material sort took %.3lf seconds\n", stop-start); //////////////////////////////////////////////////////////////////////////////// // Sort by Energy //////////////////////////////////////////////////////////////////////////////// start = get_time(); // Count up number of each type of sample. int num_samples_per_mat[12] = {0}; for( int l = 0; l < in.lookups; l++ ) num_samples_per_mat[ SD.mat_samples[l] ]++; // Determine offsets int offsets[12] = {0}; for( int m = 1; m < 12; m++ ) offsets[m] = offsets[m-1] + num_samples_per_mat[m-1]; stop = get_time(); if(mype == 0) printf("Counting samples and offsets took %.3lf seconds\n", stop-start); start = stop; // Sort each material type by energy level int offset = 0; for( int m = 0; m < 12; m++ ) quickSort_parallel_d_i(SD.p_energy_samples + offsets[m],SD.mat_samples + offsets[m], num_samples_per_mat[m], in.nthreads); stop = get_time(); if(mype == 0) printf("Energy Sorts took %.3lf seconds\n", stop-start); //////////////////////////////////////////////////////////////////////////////// // Perform lookups for each material separately //////////////////////////////////////////////////////////////////////////////// start = get_time(); unsigned long long verification = 0; // Individual Materials offset = 0; #P2 for( int m = 0; m < 12; m++ ) { #pragma omp parallel for schedule(dynamic,#P1) reduction(+:verification) for( int i = offset; i < offset + num_samples_per_mat[m]; i++) { // load pre-sampled energy and material for the particle double p_energy = SD.p_energy_samples[i]; int mat = SD.mat_samples[i]; double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( p_energy, // Sampled neutron energy (in lethargy) mat, // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation SD.num_nucs, // 1-D array with number of nuclides per material SD.concs, // Flattened 2-D array with concentration of each nuclide in each material SD.unionized_energy_array, // 1-D Unionized energy array SD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level SD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation SD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) SD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we prevent thread // contention by using an OMP reduction on the verification value. // For accelerators, a different approach might be required // (e.g., atomics, reduction of thread-specific values in large // array via CUDA thrust, etc). double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } verification += max_idx+1; } offset += num_samples_per_mat[m]; } stop = get_time(); if(mype == 0) printf("XS Lookups took %.3lf seconds\n", stop-start); return verification; } //GridInit.c SimulationData grid_init_do_not_profile( Inputs in, int mype ) { // Structure to hold all allocated simuluation data arrays SimulationData SD; // Keep track of how much data we're allocating size_t nbytes = 0; // Set the initial seed value uint64_t seed = 42; //////////////////////////////////////////////////////////////////// // Initialize Nuclide Grids //////////////////////////////////////////////////////////////////// if(mype == 0) printf("Intializing nuclide grids...\n"); // First, we need to initialize our nuclide grid. This comes in the form // of a flattened 2D array that hold all the information we need to define // the cross sections for all isotopes in the simulation. // The grid is composed of "NuclideGridPoint" structures, which hold the // energy level of the grid point and all associated XS data at that level. // An array of structures (AOS) is used instead of // a structure of arrays, as the grid points themselves are accessed in // a random order, but all cross section interaction channels and the // energy level are read whenever the gridpoint is accessed, meaning the // AOS is more cache efficient. // Initialize Nuclide Grid SD.length_nuclide_grid = in.n_isotopes * in.n_gridpoints; SD.nuclide_grid = (NuclideGridPoint *) malloc( SD.length_nuclide_grid * sizeof(NuclideGridPoint)); assert(SD.nuclide_grid != NULL); nbytes += SD.length_nuclide_grid * sizeof(NuclideGridPoint); for( int i = 0; i < SD.length_nuclide_grid; i++ ) { SD.nuclide_grid[i].energy = LCG_random_double(&seed); SD.nuclide_grid[i].total_xs = LCG_random_double(&seed); SD.nuclide_grid[i].elastic_xs = LCG_random_double(&seed); SD.nuclide_grid[i].absorbtion_xs = LCG_random_double(&seed); SD.nuclide_grid[i].fission_xs = LCG_random_double(&seed); SD.nuclide_grid[i].nu_fission_xs = LCG_random_double(&seed); } // Sort so that each nuclide has data stored in ascending energy order. #P3 for( int i = 0; i < in.n_isotopes; i++ ) qsort( &SD.nuclide_grid[i*in.n_gridpoints], in.n_gridpoints, sizeof(NuclideGridPoint), NGP_compare); // error debug check /* #P3 for( int i = 0; i < in.n_isotopes; i++ ) { printf("NUCLIDE %d ==============================\n", i); for( int j = 0; j < in.n_gridpoints; j++ ) printf("E%d = %lf\n", j, SD.nuclide_grid[i * in.n_gridpoints + j].energy); } */ //////////////////////////////////////////////////////////////////// // Initialize Acceleration Structure //////////////////////////////////////////////////////////////////// if( in.grid_type == NUCLIDE ) { SD.length_unionized_energy_array = 0; SD.length_index_grid = 0; } if( in.grid_type == UNIONIZED ) { if(mype == 0) printf("Intializing unionized grid...\n"); // Allocate space to hold the union of all nuclide energy data SD.length_unionized_energy_array = in.n_isotopes * in.n_gridpoints; SD.unionized_energy_array = (double *) malloc( SD.length_unionized_energy_array * sizeof(double)); assert(SD.unionized_energy_array != NULL ); nbytes += SD.length_unionized_energy_array * sizeof(double); // Copy energy data over from the nuclide energy grid #P3 for( int i = 0; i < SD.length_unionized_energy_array; i++ ) SD.unionized_energy_array[i] = SD.nuclide_grid[i].energy; // Sort unionized energy array qsort( SD.unionized_energy_array, SD.length_unionized_energy_array, sizeof(double), double_compare); // Allocate space to hold the acceleration grid indices SD.length_index_grid = SD.length_unionized_energy_array * in.n_isotopes; SD.index_grid = (int *) malloc( SD.length_index_grid * sizeof(int)); assert(SD.index_grid != NULL); nbytes += SD.length_index_grid * sizeof(int); // Generates the double indexing grid int * idx_low = (int *) calloc( in.n_isotopes, sizeof(int)); assert(idx_low != NULL ); double * energy_high = (double *) malloc( in.n_isotopes * sizeof(double)); assert(energy_high != NULL ); #P3 for( int i = 0; i < in.n_isotopes; i++ ) energy_high[i] = SD.nuclide_grid[i * in.n_gridpoints + 1].energy; #pragma clang loop(e,i) tile sizes(#P4,#P5) #pragma clang loop id(e) for( long e = 0; e < SD.length_unionized_energy_array; e++ ) { #pragma clang loop id(i) for( long i = 0; i < in.n_isotopes; i++ ) { double unionized_energy = SD.unionized_energy_array[e]; if( unionized_energy < energy_high[i] ) SD.index_grid[e * in.n_isotopes + i] = idx_low[i]; else if( idx_low[i] == in.n_gridpoints - 2 ) SD.index_grid[e * in.n_isotopes + i] = idx_low[i]; else { idx_low[i]++; SD.index_grid[e * in.n_isotopes + i] = idx_low[i]; energy_high[i] = SD.nuclide_grid[i * in.n_gridpoints + idx_low[i] + 1].energy; } } } free(idx_low); free(energy_high); } if( in.grid_type == HASH ) { if(mype == 0) printf("Intializing hash grid...\n"); SD.length_unionized_energy_array = 0; SD.length_index_grid = in.hash_bins * in.n_isotopes; SD.index_grid = (int *) malloc( SD.length_index_grid * sizeof(int)); assert(SD.index_grid != NULL); nbytes += SD.length_index_grid * sizeof(int); double du = 1.0 / in.hash_bins; // For each energy level in the hash table #pragma omp parallel for for( long e = 0; e < in.hash_bins; e++ ) { double energy = e * du; // We need to determine the bounding energy levels for all isotopes for( long i = 0; i < in.n_isotopes; i++ ) { SD.index_grid[e * in.n_isotopes + i] = grid_search_nuclide( in.n_gridpoints, energy, SD.nuclide_grid + i * in.n_gridpoints, 0, in.n_gridpoints-1); } } } //////////////////////////////////////////////////////////////////// // Initialize Materials and Concentrations //////////////////////////////////////////////////////////////////// if(mype == 0) printf("Intializing material data...\n"); // Set the number of nuclides in each material SD.num_nucs = load_num_nucs(in.n_isotopes); SD.length_num_nucs = 12; // There are always 12 materials in XSBench // Intialize the flattened 2D grid of material data. The grid holds // a list of nuclide indices for each of the 12 material types. The // grid is allocated as a full square grid, even though not all // materials have the same number of nuclides. SD.mats = load_mats(SD.num_nucs, in.n_isotopes, &SD.max_num_nucs); SD.length_mats = SD.length_num_nucs * SD.max_num_nucs; // Intialize the flattened 2D grid of nuclide concentration data. The grid holds // a list of nuclide concentrations for each of the 12 material types. The // grid is allocated as a full square grid, even though not all // materials have the same number of nuclides. SD.concs = load_concs(SD.num_nucs, SD.max_num_nucs); SD.length_concs = SD.length_mats; if(mype == 0) printf("Intialization complete. Allocated %.0lf MB of data.\n", nbytes/1024.0/1024.0 ); return SD; }
GB_binop__minus_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_fp32) // A.*B function (eWiseMult): GB (_AemultB_01__minus_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__minus_fp32) // A.*B function (eWiseMult): GB (_AemultB_03__minus_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_fp32) // A*D function (colscale): GB (_AxD__minus_fp32) // D*A function (rowscale): GB (_DxB__minus_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__minus_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__minus_fp32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_fp32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_fp32) // C=scalar+B GB (_bind1st__minus_fp32) // C=scalar+B' GB (_bind1st_tran__minus_fp32) // C=A+scalar GB (_bind2nd__minus_fp32) // C=A'+scalar GB (_bind2nd_tran__minus_fp32) // C type: float // A type: float // B,b type: float // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_FP32 || GxB_NO_MINUS_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__minus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_fp32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_fp32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__minus_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__minus_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__ainv_int16_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int16_uint64 // op(A') function: GB_tran__ainv_int16_uint64 // C type: int16_t // A type: uint64_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int16_uint64 ( int16_t *Cx, // Cx and Ax may be aliased uint64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int16_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__minv_uint32_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint32_uint64 // op(A') function: GB_tran__minv_uint32_uint64 // C type: uint32_t // A type: uint64_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 32) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 32) ; // casting #define GB_CASTING(z, aij) \ uint32_t z = (uint32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint32_uint64 ( uint32_t *Cx, // Cx and Ax may be aliased uint64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint32_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__exp_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__exp_fp64_fp64 // op(A') function: GB_unop_tran__exp_fp64_fp64 // C type: double // A type: double // cast: double cij = aij // unaryop: cij = exp (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = exp (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = exp (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXP || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__exp_fp64_fp64 ( double *Cx, // Cx and Ax may be aliased const double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = exp (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__exp_fp64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__identity_bool_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_bool_uint64 // op(A') function: GB_tran__identity_bool_uint64 // C type: bool // A type: uint64_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ bool z = (bool) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_bool_uint64 ( bool *Cx, // Cx and Ax may be aliased uint64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_bool_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
bd_omp_myc.c
#include <stdlib.h> #include <stdio.h> #include <unistd.h> // access #include <math.h> #include <assert.h> #include "timer.h" #include "bd.h" #include <omp.h> #include <mkl.h> #include "matrix.h" #define NTHREADS 1 #define M_PI 3.14159265358979323846 #define my_EPS 0.000000001 void print_matrix(double *a, int n){ for(int i=0;i<5;i++){ for(int j=0;j<n;j++){ printf("%lf ", a[i*n+j]); } printf("\n\n"); } return; } void print_array(double *a, int n){ for(int i=0;i<n;i++){ printf("%lf ", a[i]); } printf("\n"); return; } //********************************CHOLESKY***************************************** void show_matrix(double *A, int n) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) printf("%2.5f ", A[i * n + j]); printf("\n"); } } double * cholOMP(double * L, int n) { int i, j, k; omp_lock_t writelock; omp_init_lock(&writelock); for (int j = 0; j < n; j++) { for (int i = 0; i < j; i++){ L[i*n+j] = 0; } #pragma omp parallel for shared(L) private(k) for (k = 0; k < i; k++) { omp_set_lock(&writelock); L[j*n+j] = L[j*n+j] - L[j*n+k] * L[j*n+k]; //Critical section. omp_unset_lock(&writelock); } #pragma omp single L[i*n+i] = sqrt(L[j*n+j]); #pragma omp parallel for shared(L) private(i, k) for (i = j+1; i < n; i++) { for (k = 0; k < j; k++) { L[i*n+j] = L[i*n+j] - L[i*n+k] * L[j*n+k]; } L[i*n+j] = L[i*n+j] / L[j*n+j]; } omp_destroy_lock(&writelock); } return L; } double mul_sum(double *x, double *y, int n) { double s = 0; for (int i = 0; i < n; i++) { s += x[i]*y[i]; } return s; } //Return the transpose of a square matrix. void my_transpose(double *A, int n){// A = n*n int temp; for(int i = 0 ; i < n ; i++){ for (int j = i+1; j < n ; j++){ temp = A[i*n+j]; A[i*n+j] = A[j*n+i]; A[j*n+i] = temp; } } return ; } double *cho2(double *X, int n) { double *L_c = (double*)calloc(n*n, sizeof(double)); for (int j = 0; j <n; j++) { double s = mul_sum(&L_c[j * n], &L_c[j * n], j); // using the inner product and updating the matrix values L_c[j * n + j] = sqrt(X[j * n + j] - s); #pragma omp parallel for schedule(static) for (int i = j+1; i <n; i++) {// parallelizing the inner loop double s = mul_sum(&L_c[j * n], &L_c[i * n], j); L_c[i * n + j] = (X[i*n+j]-s)/ L_c[j * n + j] ; } } my_transpose(L_c, n); return L_c; } //****************************** RPY_EWALD part ***************************************************** inline void scalar_rpy_ewald_real(double r, double xi, double a3, double *m11, double *m12) { double a = 1.; double xi2 = xi*xi; double xi3 = xi2*xi; double xi5 = xi3*xi2; double xi7 = xi5*xi2; double r2 = r*r; double r4 = r2*r2; double ri = 1./r; double ri2 = ri*ri; double ri3 = ri*ri2; double erfc_xi_r = erfc(xi*r); double pi_exp = 1./sqrt(M_PI) * exp(-xi2*r2); *m11 = (0.75*a*ri + 0.5*a3*ri3)*erfc_xi_r + ( 4*xi7*a3*r4 + 3*xi3*a*r2 - 20*xi5*a3*r2 - 4.5*xi*a + 14*xi3*a3 + xi*a3*ri2)*pi_exp; *m12 = (0.75*a*ri - 1.5*a3*ri3)*erfc_xi_r + (-4*xi7*a3*r4 - 3*xi3*a*r2 + 16*xi5*a3*r2 + 1.5*xi*a - 2*xi3*a3 - 3*xi*a3*ri2)*pi_exp; } inline void scalar_rpy_ewald_recip(double k, double xi, double *m2) { double a = 1.; double a3 = 1.; double k2 = k*k; double xii2k2 = k2/(xi*xi); *m2 = (1. + 0.25*xii2k2 + 0.125*xii2k2*xii2k2) * 6.*M_PI/k2 * exp(-0.25*xii2k2); } // note: positions must be wrapped inside the box [0,L] int rpy_ewald(int np, double * restrict a, const double * restrict pos, double L, const double * restrict rad, double xi, int nr, int nk) { // printf("Inside function rpy_ewald\n"); __declspec(align(64)) double rvec[8]; __declspec(align(64)) double rvec0[8]; __declspec(align(64)) double temp[8]; double a3; double m11, m12, m2; double eye3_coef; double r2, r; int x, y, z; int i, j; double *ap0, *ap; int vsize = ((2*nk+1)*(2*nk+1)*(2*nk+1) - 1) / 2; #define VSIZE ((2*6+1)*(2*6+1)*(2*6+1) - 1) / 2 // int A_VSIZE = ceil(VSIZE/8.0)*8; // int K_VSIZE = ceil(3*VSIZE/8.0)*8; // printf("check vsize=%d\n", A_VSIZE); __declspec(align(64)) double k_array[VSIZE];//1104 __declspec(align(64)) double m2_array[VSIZE];//1104 __declspec(align(64)) double kvec_array[3*VSIZE];//3296 int ind; __declspec(align(64)) double kvec[8]; double k; double t; double vinv = 1./(L*L*L); double time0, time1; double time0_real, time1_real; double time0_recip, time1_recip; // INDICES for converting for loops int _b, _index, ib, ib2; // ************************************************************************* // // compute and save coefficients for reciprocal-space sum // // Due to symmetry, only need half of the grid points ind = 0; _b = (2*nk+1); for (_index =0 ;_index < (_b*_b*_b -1)/2; _index++){// Using indices x,y,z are recalculated z = _index%(_b)-nk;// adjusting the indices x = (_index-_index%(_b*_b))/(_b*_b)-nk; y = (_index%(_b*_b)-_index%(_b))/_b-nk; k_array[ind] = 2.*M_PI/L*sqrt((double)(x*x + y*y + z*z)); scalar_rpy_ewald_recip(k_array[ind], xi, &m2_array[ind]); kvec_array[3*ind ] = 2.*M_PI/L*x; kvec_array[3*ind+1] = 2.*M_PI/L*y; kvec_array[3*ind+2] = 2.*M_PI/L*z; ind++; } #pragma omp parallel for schedule(static) num_threads(NTHREADS) private(i, j, ap, ap0, _b, temp, eye3_coef, _index, rvec0, rvec, x, y, z, r, r2, m11, m12, a3 ) for (int _index1 = np*(np-1)/2-1; _index1>=0; _index1--){ i = np-1-(int)((1+sqrt(8*_index1+1))/2); j = np-1-_index1 + (int)((1+sqrt(8*_index1+1))/2)*((int)((1+sqrt(8*_index1+1))/2)-1)/2; temp[0] = 0.; temp[1] = 0.; temp[3] = 0.; temp[2] = 0.; temp[4] = 0.; temp[5] = 0.; eye3_coef = 0.; rvec0[0] = pos[3*i] - pos[3*j]; rvec0[1] = pos[3*i+1] - pos[3*j+1]; rvec0[2] = pos[3*i+2] - pos[3*j+2]; a3 = 0.5*(rad[i]*rad[i] + rad[j]*rad[j]); _b = (2*nr+1); //shared(eye3_coef, temp, rvec0, L, xi, a3, m11, m12, _b, xi3, xi5, xi7, xi) //// #pragma omp parallel for schedule(static) private(rvec, x, y, z, r, r2, m11, m12) shared(eye3_coef, temp, rvec0, a3) for (_index =0 ;_index < _b*_b*_b; _index++){ z =_index%(_b)-nr;// adjusting the indices x = (_index-_index%(_b*_b))/(_b*_b)-nr; y = (_index%(_b*_b)-_index%(_b))/_b-nr; rvec[0] = rvec0[0] + x*L; rvec[1] = rvec0[1] + y*L; rvec[2] = rvec0[2] + z*L; // compute norm r2 = rvec[0]*rvec[0] + rvec[1]*rvec[1] + rvec[2]*rvec[2]; r = sqrt(r2); rvec[0] /= r; rvec[1] /= r; rvec[2] /= r; scalar_rpy_ewald_real(r, xi, a3, &m11, &m12); eye3_coef += m11; temp[0] += m12 * rvec[0] * rvec[0]; temp[1] += m12 * rvec[0] * rvec[1]; temp[2] += m12 * rvec[0] * rvec[2]; temp[3] += m12 * rvec[1] * rvec[1]; temp[4] += m12 * rvec[1] * rvec[2]; temp[5] += m12 * rvec[2] * rvec[2]; } // add contribution to eye3 term temp[0] += eye3_coef; temp[3] += eye3_coef; temp[5] += eye3_coef; // sum into global matrix (only lower-triangular part) // // Use matlab to add transpose ap0 = &a[np*3*3*i + 3*j]; ap = ap0; *ap++ = temp[0]; *ap++ = temp[1]; *ap = temp[2]; ap = ap0+np*3; *ap++ = temp[1]; *ap++ = temp[3]; *ap = temp[4]; ap = ap0+np*3+np*3; *ap++ = temp[2]; *ap++ = temp[4]; *ap = temp[5]; } // reciprocal-space sum #pragma omp parallel for schedule(static) num_threads(NTHREADS) private(i, j, temp, ap, ap0, ind, rvec, kvec, k, m2, t, a3) for (_index = np*(np+1)/2-1; _index>=0; _index--){ i = np-1-(int)((-1+sqrt(8*_index+1))/2); j = np-1-_index + (int)((-1+sqrt(8*_index+1))/2)*((int)((-1+sqrt(8*_index+1))/2)+1)/2; rvec[0] = pos[3*i+0] - pos[3*j]; rvec[1] = pos[3*i+1] - pos[3*j+1]; rvec[2] = pos[3*i+2] - pos[3*j+2]; temp[0] = 0.; temp[1] = 0.; temp[3] = 0.; temp[2] = 0.; temp[4] = 0.; temp[5] = 0.; a3 = 0.5*(rad[i]*rad[i] + rad[j]*rad[j]); for (ind=0; ind<vsize; ind++) { k = k_array[ind]; m2 = m2_array[ind]; kvec[0] = kvec_array[3*ind ]; kvec[1] = kvec_array[3*ind+1]; kvec[2] = kvec_array[3*ind+2]; t = 2.*vinv*m2*cos(kvec[0]*rvec[0] + kvec[1]*rvec[1] + kvec[2]*rvec[2])*(1.-a3*k*k/3.); kvec[0] /= k; kvec[1] /= k; kvec[2] /= k; temp[0] += t * (1. - kvec[0]*kvec[0]); temp[1] += t * - kvec[0]*kvec[1]; temp[2] += t * - kvec[0]*kvec[2]; temp[3] += t * (1. - kvec[1]*kvec[1]); temp[4] += t * - kvec[1]*kvec[2]; temp[5] += t * (1. - kvec[2]*kvec[2]); } // sum into matrix // // sum with existing values ap0 = &a[np*3*3*i + 3*j]; ap = ap0; *ap++ += temp[0]; *ap++ += temp[1]; *ap += temp[2]; ap = ap0+np*3; *ap++ += temp[1]; *ap++ += temp[3];// diagonal element *ap += temp[4]; ap = ap0+np*3+np*3; *ap++ += temp[2]; *ap++ += temp[4]; *ap += temp[5];// diagonal element } // self-part for (i=0; i<np; i++)// adding some term to diagonal { t = 1./rad[i] - (6. - 40./3.*xi*xi*rad[i]*rad[i])*xi/sqrt(M_PI); t *= 0.5; for (j=0; j<3; j++) { ind = 3*i+j; a[ind*np*3+ind] = a[ind*np*3+ind]*0.5+t;// taking care of (i==j) condition } } return 0; } //************************************************************************************************** void get_indices(int index, int *i, int *j, int *k, int b){ int ib, ib2; ib = index%(b); ib2 = index%(b*b); *k = ib; *i = (index-ib2)/(b*b); *j = (ib2-*k)/b; return; } struct box { int head; }; // it is possible to use smaller boxes and more complex neighbor patterns #define NUM_BOX_NEIGHBORS 14 int box_neighbors[NUM_BOX_NEIGHBORS][3] = { {-1,-1,-1}, {-1,-1, 0}, {-1,-1,+1}, {-1, 0,-1}, {-1, 0, 0}, {-1, 0,+1}, {-1,+1,-1}, {-1,+1, 0}, {-1,+1,+1}, { 0,-1,-1}, { 0,-1, 0}, { 0,-1,+1}, { 0, 0,-1}, { 0, 0, 0} // will calculate within the box interactions }; /* // CHECK RPY************* int gold_read(const char *filename, int npos, double *gold) { int npos_read; FILE *fp = fopen(filename, "r"); assert(fp); fscanf(fp, "%d\n", &npos_read); char label[100]; fgets(label, 100, fp); assert(npos == npos_read); for (int i=0; i<3*npos; i++) { for (int j=0; j<3*npos; j++) { fscanf(fp, "%lf\n", &gold[i*(3*npos) + j]); } } fclose(fp); return 0; } double compare_gold(int npos, double *a,double *gold) { double err = 0.0; printf("a = %lf\n", a[3]); printf("gold = %lf\n", gold[3]); for (int i=0; i<npos; i++) { for (int j=0; j<npos; j++) { double diff = a[i*(npos*3) + j] - gold[i*(npos*3) +j]; err += diff*diff; // if(err>0){printf("error at position: i=%d j=%d and err = %lf\n", i, j, err);} // printf("error at position: i=%d j=%d and err = %lf\n", i, j, err); } } return err; } // ********************** */ int bd(int npos, double * restrict pos_orig, double * restrict buf, const int *types, double L, double * restrict pos, int* restrict next, double* restrict forces, double f_const, double * restrict au, double * restrict rad, double xi, int nr, int nk, double * restrict hd_vec) { /* //************************** CHECK RPY part *************************************************** printf("npos = %d, L= %lf\n", npos, L); char *gold_filename = "gold.dat"; double *gold = (double *) _mm_malloc((3*npos) * (3*npos) * sizeof(double), 64); if (access(gold_filename, F_OK) == -1) { printf("[WARNING] Unable to access gold file \"%s\"; comparison will not proceed.\n", gold_filename); } else { gold_read(gold_filename, npos, gold); } rpy_ewald(npos, au, pos_orig, L, rad, xi, nr, nk);// DELETE after testing double error = compare_gold(npos, au, gold); printf("Squared Error: %f\n", error); return 500; //********************************************************************************************* */ // Initialisations required for INTERACTION FUNCTION******** NOTE: Can take input to bd itself!!! double krepul = 100, a=1, a_sq, phi=0.2, f; a_sq = a*a; int boxdim;// boxdim is number of cells in L double cutoff2; int numpairs_p; cutoff2 = 4;// cutoff < L/boxdim boxdim =(int)(L/cutoff2)*a;//(int)(L/cutoff2*0.8); printf("L = %lf cutoff2 = %lf boxdim = %d\n", L, cutoff2, boxdim); struct box b[boxdim][boxdim][boxdim]; struct box *bp; struct box *neigh_bp; // box indices int idx, idy, idz, index, box2, ib2; int neigh_idx, neigh_idy, neigh_idz; // allocate implied linked list int p1, p2, j, i; double d2, dx, dy, dz, s; box2 = boxdim*boxdim; //*****************************************END initialisations*********************************** if (boxdim < 4 || cutoff2 > (L/boxdim)*(L/boxdim)) { printf("interactions: bad input parameters\n"); // return 1; } double t0, t_init_cells = 0, t_assign_to_cells=0, t_update_pos=0, t_force=0, t_hd = 0, t_cho = 0; for (int step=0; step<INTERVAL_LEN; step++) { // printf("step = %d\n", step); // Calculation of interaction per time step t0 = time_in_seconds(); // allocate memory for particles in each box // #pragma omp parallel for schedule(static) private(idx, idy, idz, ib2) shared(b, boxdim, box2) // for (index=0; index<boxdim*box2; index++){ // idz = index%(boxdim); // ib2 = index%(box2); // idx = (index-ib2)/(box2); // idy = (ib2-idz)/boxdim; // b[idx][idy][idz].head=-1; // } for (idx=0; idx<boxdim; idx++){ for (idy=0; idy<boxdim; idy++){ for (idz=0; idz<boxdim; idz++){ b[idx][idy][idz].head=-1; } } } t_init_cells += time_in_seconds()-t0; t0 = time_in_seconds(); // traverse all particles and assign to boxes // #pragma omp parallel for schedule(static) private(i, idx, idy, idz, bp) shared(b, next) num_threads(NTHREADS) for (i=0; i<npos; i++) { if (pos_orig[3*i] >= 0){pos[3*i]= fmod(pos_orig[3*i], L);}// OR SINCE PARTICLES moving slowly.. change to -L else {// pos_orig[i] is negative pos[3*i] = L-fmod(-1*pos_orig[3*i], L); } if (pos_orig[3*i+1] >= 0){pos[3*i+1]= fmod(pos_orig[3*i+1], L);}// OR SINCE PARTICLES moving slowly.. change to -L else {// pos_orig[i] is negative pos[3*i+1] = L-fmod(-1*pos_orig[3*i+1], L); } if (pos_orig[3*i+2] >= 0){pos[3*i+2]= fmod(pos_orig[3*i+2], L);}// OR SINCE PARTICLES moving slowly.. change to -L else {// pos_orig[i] is negative pos[3*i+2] = L-fmod(-1*pos_orig[3*i+2], L); } if (pos[3*i]<0){printf("pos_orig = %lf pos defect = %lf and i = %d and L =%lf\n", pos_orig[3*i], pos[3*i], i, L);} // initialize entry of implied linked list next[i] = -1; forces[3*i+0] = 0; forces[3*i+1] = 0; forces[3*i+2] = 0; // re-initialising interaction forces at each time step // which box does the particle belong to? // assumes particles have positions within [0,L]^3 idx = (int)(pos[3*i ]/L*boxdim); idy = (int)(pos[3*i+1]/L*boxdim); idz = (int)(pos[3*i+2]/L*boxdim); // add to beginning of implied linked list bp = &b[idx][idy][idz]; // next[i] = bp->head; // next = previous (my notation) // #pragma omp critical // { next[i] = bp->head; // next = previous (my notation) bp->head = i; // head = latest (my notation) // } } t_assign_to_cells += time_in_seconds()-t0; t0 = time_in_seconds(); #pragma omp parallel for schedule(static) private(j, neigh_idx, neigh_idy, neigh_idz, neigh_bp, p1, p2, dx, dy, dz, d2, s, f, idx, idy, idz, ib2, bp) shared(b, box_neighbors, boxdim, L, pos, forces, krepul, a, a_sq, next, box2) num_threads(NTHREADS) for (index=0; index<boxdim*box2; index++){ idz = index%(boxdim); ib2 = index%(box2); idx = (index-ib2)/(box2); idy = (ib2-idz)/boxdim; bp = &b[idx][idy][idz]; // interactions within and other boxes #pragma omp parallel for schedule(static) private(j, neigh_idx, neigh_idy, neigh_idz, neigh_bp, p1, p2, dx, dy, dz, d2, s, f) shared(bp, b, box_neighbors, boxdim, L, pos, forces, krepul, a, a_sq, next, idx, idy, idz)// num_threads(NTHREADS) for (j=0; j<NUM_BOX_NEIGHBORS; j++) { neigh_idx = (idx + box_neighbors[j][0] + boxdim) % boxdim; neigh_idy = (idy + box_neighbors[j][1] + boxdim) % boxdim; neigh_idz = (idz + box_neighbors[j][2] + boxdim) % boxdim; neigh_bp = &b[neigh_idx][neigh_idy][neigh_idz]; // when using boxes, the minimum image computation is // known beforehand, thus we can compute position offsets // to compensate for wraparound when computing distances double xoffset = 0.; double yoffset = 0.; double zoffset = 0.; if (idx + box_neighbors[j][0] == -1) xoffset = -L; if (idy + box_neighbors[j][1] == -1) yoffset = -L; if (idz + box_neighbors[j][2] == -1) zoffset = -L; if (idx + box_neighbors[j][0] == boxdim) xoffset = L; if (idy + box_neighbors[j][1] == boxdim) yoffset = L; if (idz + box_neighbors[j][2] == boxdim) zoffset = L; // NOTE: modifying the function to update the forces p1 = neigh_bp->head; while (p1 != -1) { p2 = bp->head; while (p2 != -1) { // compute distance vector dx = pos[3*p1+0] - pos[3*p2+0] + xoffset; dy = pos[3*p1+1] - pos[3*p2+1] + yoffset; dz = pos[3*p1+2] - pos[3*p2+2] + zoffset; d2 = dx*dx+dy*dy+dz*dz+my_EPS; if ( d2<4.0*a_sq) { s = sqrt(d2); f = krepul*(2*a-s); #pragma omp atomic forces[3*p1+0] += f*dx/s; #pragma omp atomic forces[3*p1+1] += f*dy/s; #pragma omp atomic forces[3*p1+2] += f*dz/s; #pragma omp atomic forces[3*p2+0] -= f*dx/s; #pragma omp atomic forces[3*p2+1] -= f*dy/s; #pragma omp atomic forces[3*p2+2] -= f*dz/s; } p2 = next[p2]; } p1 = next[p1]; } } } t_force += time_in_seconds() - t0; t0 = time_in_seconds(); // printf("Calculating the Hydrodynamic Interations for the given particle positions\n"); // au = upper triangular matrix with hydrodynamic interaction values // pos = wrapped up position inside the box_width = L; // rad = radius of particles; xi, nr, nk are constants. for (int p1=0; p1<3*npos*3*npos; p1++){ au[p1] = 0; } rpy_ewald(npos, au, pos, L, rad, xi, nr, nk); t_hd += time_in_seconds() - t0; // print_matrix(au, 3*npos); // printf("Getting the cholesky decomposition\n"); t0 = time_in_seconds(); // LAPACKE_dpotrf(LAPACK_ROW_MAJOR, 'U', 3*npos, au, 3*npos); // double* au_c = cho2(au, 3*npos); au = cho2(au, 3*npos); /* double m1[] = {25, 15, -5, 15, 18, 0, -5, 0, 11}; double *c1 = cho2(m1, 3); show_matrix(c1, 3); my_transpose(c1, 3); show_matrix(c1, 3); */ t_cho += time_in_seconds() - t0; // Get interations vector by multiplying l_cols by buf) // print_matrix(au_c, 3*npos); // print_matrix(au, 3*npos); // printf("Multiplying by random gaussian vector \n"); t0 = time_in_seconds(); // generate random values from standard normal distribution // note: this MKL function is sequential but vectorized vdRngGaussian(VSL_RNG_METHOD_GAUSSIAN_BOXMULLER, stream, 3*npos, buf, 0., 1.); cblas_dgemm(CblasRowMajor, CblasTrans, CblasTrans, 3*npos, 1, 3*npos, 1, au, 3*npos, buf, 3*npos, 0, hd_vec, 1); // print_array(buf, 3*npos); // printf("printing the correlation vector\n"); // print_array(hd_vec, 3*npos); // update positions with Brownian displacements #pragma omp parallel for schedule(static) shared(pos_orig) private(i) num_threads(NTHREADS) for (int i=0; i<3*npos; i++) { // pos_orig[i] += forces[i]*DELTAT+f_const*buf[i]; pos_orig[i] += forces[i]*DELTAT+f_const*hd_vec[i]; } t_update_pos += time_in_seconds() - t0; } printf("--------------------------------------------------------\n"); printf("Time: %f for initiating the cell head \n", t_init_cells); printf("Time: %f for assigning particles to cells \n", t_assign_to_cells); printf("Time: %f for force calculations \n", t_force); printf("Time: %f for hydrodynamic \n", t_hd); printf("Time: %f for cholesky \n", t_cho); printf("Time: %f for pos update \n", t_update_pos); printf("--------------------------------------------------------\n"); return 0; }
bert_layer_mb1_fixed_tokens.h
#ifndef BERT_LAYER_H_ #define BERT_LAYER_H_ #include <new> #include <string> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <assert.h> #include <math.h> #include <mkl.h> #include <omp.h> #include <iostream> #include <immintrin.h> #include "my_types.h" //#include "timer.h" class BertLayer { public: // hiddenSize 768 隐层神经元、隐藏单元数 // intermediateSize 3072 feed-forward/filter size 升维维度 4*hiddensize BertLayer(int maxTokenSize = 128, int hiddenSize = 768, int intermediateSize = 3072) { this->maxTokenSize = maxTokenSize; this->hiddenSize = hiddenSize; this->intermediateSize = intermediateSize; qkvMatMul.Resize(maxTokenSize, hiddenSize*3); resultBuffer1.Resize(maxTokenSize, hiddenSize); resultBuffer2.Resize(maxTokenSize, hiddenSize); intermediateBuffer.Resize(maxTokenSize, intermediateSize); for (int i = 0; i < 12; ++i) { qk_result[i] = (float *)aligned_alloc(64, sizeof(float) * maxTokenSize * maxTokenSize); exp_buffer[i] = (float *)aligned_alloc(64, sizeof(float) * maxTokenSize); } magic_value = (float *)aligned_alloc(64, sizeof(float) * maxTokenSize); #pragma omp parallel { int tid = omp_get_thread_num(); if (tid == 0) { num_threads = omp_get_num_threads(); } } #ifndef __INTEL_COMPILER erf_buffer = new float * [num_threads]; for (int i = 0; i < num_threads; ++i) { erf_buffer[i] = (float *)aligned_alloc(64, sizeof(float) * intermediateSize); } #endif } virtual ~BertLayer() { for (int i = 0; i < 12; ++i) { free(qk_result[i]); free(exp_buffer[i]); qk_result[i] = NULL; exp_buffer[i] = NULL; } free(magic_value); magic_value = NULL; #ifndef __INTEL_COMPILER for (int i = 0; i < num_threads; ++i) { free(erf_buffer[i]); } delete[] erf_buffer; erf_buffer = NULL; #endif } void setWeights(const float *_queryWeight, const float *_queryBias, const float *_keyWeight, const float *_keyBias, const float *_valueWeight, const float *_valueBias, const float *_attentionOutputWeight, const float *_attentionOutputBias, const float *_gamma1, const float *_beta1, const float *_intermediateWeight, const float *_intermediateBias, const float *_outputWeight, const float *_outputBias, const float *_gamma2, const float *_beta2) { // Merged weights, dimension is like: 768*(768*3) hpj::Matrix<float> tmp; tmp.Resize(hiddenSize, hiddenSize * 3); copyWeights(tmp, 0, hiddenSize, _queryWeight); copyWeights(tmp, hiddenSize, hiddenSize*2, _keyWeight); copyWeights(tmp, hiddenSize*2, hiddenSize*3, _valueWeight); copyTransposed(qkvWeight, tmp); /* qkvWeight.Resize(hiddenSize, hiddenSize * 3); copyWeights(qkvWeight, 0, hiddenSize, _queryWeight); copyWeights(qkvWeight, hiddenSize, hiddenSize*2, _keyWeight); copyWeights(qkvWeight, hiddenSize*2, hiddenSize*3, _valueWeight); */ // Merged bias qkvBias.Resize(hiddenSize * 3); memcpy(qkvBias.Data(), _queryBias, sizeof(float) * hiddenSize); memcpy(qkvBias.Data() + hiddenSize, _keyBias, sizeof(float) * hiddenSize); memcpy(qkvBias.Data() + hiddenSize*2, _valueBias, sizeof(float) * hiddenSize); // Weights for attention output attentionOutputWeight.Resize(hiddenSize, hiddenSize); copyWeights(attentionOutputWeight, _attentionOutputWeight); attentionOutputBias.Resize(hiddenSize); memcpy(attentionOutputBias.Data(), _attentionOutputBias, sizeof(float) * hiddenSize); // gamma and beta for batchnorm after self attention gamma1.Resize(hiddenSize); beta1.Resize(hiddenSize); memcpy(gamma1.Data(), _gamma1, sizeof(float) * hiddenSize); memcpy(beta1.Data(), _beta1, sizeof(float) * hiddenSize); // intermediate weight and bias intermediateWeight.Resize(hiddenSize, intermediateSize); copyWeights(intermediateWeight, _intermediateWeight); intermediateBias.Resize(intermediateSize); memcpy(intermediateBias.Data(), _intermediateBias, sizeof(float) * intermediateSize); // output dense weight and bias outputWeight.Resize(intermediateSize, hiddenSize); copyWeights(outputWeight, _outputWeight); outputBias.Resize(hiddenSize); memcpy(outputBias.Data(), _outputBias, sizeof(float) * hiddenSize); // gamma and beta for the last batchnorm gamma2.Resize(hiddenSize); beta2.Resize(hiddenSize); memcpy(gamma2.Data(), _gamma2, sizeof(float) * hiddenSize); memcpy(beta2.Data(), _beta2, sizeof(float) * hiddenSize); } // Do the forward computing for the whole BERT layer // input: maxTokenSize x hidden_size // actualTokens: #tokens = maxTokenSize - padded_tokens hpj::Matrix<float> &forward(hpj::Matrix<float> &inputBuffer, int actualTokens) { // Query, Key, Value computed together sgemm(inputBuffer, qkvWeight, qkvMatMul); biasAdd(qkvMatMul, qkvBias); //dumpMatrix(qkvMatMul); // BatchMatMul hpj::Matrix<float> query(qkvMatMul, 0, qkvMatMul.Rows(), 0, hiddenSize); hpj::Matrix<float> key(qkvMatMul, 0, qkvMatMul.Rows(), hiddenSize, hiddenSize); hpj::Matrix<float> value(qkvMatMul, 0, qkvMatMul.Rows(), hiddenSize*2, hiddenSize); batchMatMul(query, key, qk_result); //printf("qk_result[0]=%f,%f\n", qk_result[0][0], qk_result[0][1]); // Softmax computeSoftmax(actualTokens); //printf("after softmax, qk_result[0]=%f,%f\n", qk_result[0][0], qk_result[0][1]); // BatchMatMul batchMatMul(qk_result, value, resultBuffer1); //printf("batchMatMul:\n"); //dumpMatrix(resultBuffer1); // dense denseWithSum(resultBuffer1, attentionOutputWeight, attentionOutputBias, inputBuffer, resultBuffer2); //printf("denseWithSum:\n"); //dumpMatrix(resultBuffer2); // batchmorm batchnorm(resultBuffer2, gamma1, beta1); //printf("batchnorm:\n"); //dumpMatrix(resultBuffer2); // intermediate intermediate(resultBuffer2, intermediateBuffer); //printf("intermediate:\n"); //dumpMatrix(intermediateBuffer); // dense in output denseWithSum(intermediateBuffer, outputWeight, outputBias, resultBuffer2, resultBuffer1); //dumpMatrix(resultBuffer1); // batchnorm batchnorm(resultBuffer1, gamma2, beta2); //dumpMatrix(resultBuffer1); return resultBuffer1; } private: void copyWeights(hpj::Matrix<float> &w, int start_col, int end_col, const float *data) { hpj::Matrix<float> subW(w, 0, w.Rows(), start_col, end_col - start_col); copyWeights(subW, data); } void copyWeights(hpj::Matrix<float> &w, const float *data) { for (int i = 0; i < w.Rows(); ++i) { for (int j = 0; j < w.Cols(); ++j) { w(i, j) = *data++; } } } void copyTransposed(hpj::Matrix<float> &dst, hpj::Matrix<float> &src) { dst.Resize(src.Cols(), src.Rows()); for (int i = 0; i < dst.Rows(); ++i) { for (int j = 0; j < dst.Cols(); ++j) { dst(i, j) = src(j, i); } } } void dumpMatrix(hpj::Matrix<float> &m) { int cols = m.Cols(); for (int i = 0; i < m.Rows(); ++i) { if (m.Cols() < 10) { for (int j = 0; j < m.Cols(); ++j) { std::cout << m(i, j) << " "; } } else { std::cout << m(i, 0) << " " << m(i, 1) << " " << m(i, 2) << " ... " << m(i, cols-3) << " " << m(i, cols-2) << " " << m(i, cols-1); } std::cout << std::endl; } } // C = A * B // bTranspose: B need to be transposed or not void sgemm(hpj::Matrix<float> &A, hpj::Matrix<float> &B, hpj::Matrix<float> &C) { bool bTranspose = (A.Cols() != B.Rows()); int m = A.Rows(); int k = A.Cols(); int n = (bTranspose ? B.Rows() : B.Cols()); float alpha = 1; float beta = 0; cblas_sgemm(CblasRowMajor, CblasNoTrans, (bTranspose ? CblasTrans : CblasNoTrans), m, n, k, alpha, A.Data(), A.Stride(), B.Data(), B.Stride(), beta, C.Data(), C.Stride()); } // result = x * weight + bias + input void denseWithSum(hpj::Matrix<float> &x, hpj::Matrix<float> &weight, hpj::Vector<float> &bias, hpj::Matrix<float> &input, hpj::Matrix<float> &result) { assert(input.Rows() == result.Rows()); assert(input.Cols() == result.Cols()); sgemm(x, weight, result); float *pbias = bias.Data(); #pragma omp parallel for for (int i = 0; i < result.Rows(); ++i) { float *presult = result.Row(i); float *pinput = input.Row(i); #pragma omp simd for (int j = 0; j < result.Cols(); ++j) { presult[j] += pinput[j] + pbias[j]; } } } void batchnorm(hpj::Matrix<float> &x, hpj::Vector<float> &gamma, hpj::Vector<float> &beta) { assert(x.Rows() == maxTokenSize); assert(x.Cols() == hiddenSize); float *pgamma = gamma.Data(); float *pbeta = beta.Data(); #pragma omp parallel for for (int i = 0; i < x.Rows(); ++i) { float sum = 0; float *px = x.Row(i); #pragma omp simd for (int j = 0; j < x.Cols(); ++j) { sum += px[j]; } float mean = sum / hiddenSize; sum = 0; #pragma omp simd for (int j = 0; j < x.Cols(); ++j) { float delta = (px[j] - mean); sum += delta * delta; } float tmp = sum / hiddenSize + 9.999999960041972e-13; float rvariance = 1.0f / sqrt(tmp); #pragma omp simd for (int j = 0; j < x.Cols(); ++j) { px[j] = (px[j] - mean) * rvariance * pgamma[j] + pbeta[j]; } } } void intermediate(hpj::Matrix<float> &input, hpj::Matrix<float> &output) { sgemm(input, intermediateWeight, output); float *pbias = intermediateBias.Data(); const float factor = sqrt(0.5f); const float scale = 0.5f / factor; #ifdef __INTEL_COMPILER #pragma omp parallel for for (int i = 0; i < output.Rows(); ++i) { float *pout = output.Row(i); #pragma omp simd for (int j = 0; j < output.Cols(); ++j) { float with_bias = pout[j] + pbias[j]; pout[j] = with_bias * 0.5f * (erf(with_bias * factor) + 1); } } #else #pragma omp parallel for for (int i = 0; i < output.Rows(); ++i) { int tid = omp_get_thread_num(); float *pout = output.Row(i); #pragma omp simd for (int j = 0; j < output.Cols(); ++j) { pout[j] = (pout[j] + pbias[j]) * factor; } vsErf(output.Cols(), pout, erf_buffer[tid]); #pragma omp simd for (int j = 0; j < output.Cols(); ++j) { pout[j] = pout[j] * scale * (erf_buffer[tid][j] + 1); } } #endif } // ONLY for dimension 768 // The first BatchMatMul inside self attention void batchMatMul(hpj::Matrix<float> &A, hpj::Matrix<float> &B, float *c_array[12]){ #define GRP_COUNT 1 MKL_INT m[GRP_COUNT] = {maxTokenSize}; MKL_INT k[GRP_COUNT] = {64}; MKL_INT n[GRP_COUNT] = {maxTokenSize}; MKL_INT lda[GRP_COUNT] = {A.Stride()}; MKL_INT ldb[GRP_COUNT] = {B.Stride()}; MKL_INT ldc[GRP_COUNT] = {maxTokenSize}; CBLAS_TRANSPOSE transA[GRP_COUNT] = { CblasNoTrans }; CBLAS_TRANSPOSE transB[GRP_COUNT] = { CblasTrans }; float alpha[GRP_COUNT] = {1.0}; float beta[GRP_COUNT] = {0.0}; const MKL_INT size_per_grp[GRP_COUNT] = {12}; // Total number of multiplications: 12 const float *a_array[12], *b_array[12]; for (int i = 0; i < 12; ++i) { a_array[i] = A.Data() + i * 64; b_array[i] = B.Data() + i * 64; } // Call cblas_sgemm_batch cblas_sgemm_batch ( CblasRowMajor, transA, transB, m, n, k, alpha, a_array, lda, b_array, ldb, beta, c_array, ldc, GRP_COUNT, size_per_grp); } // ONLY for dimension 768 // The second BatchMatMul inside self attention void batchMatMul(float *a_array[12], hpj::Matrix<float> &B, hpj::Matrix<float> &C) { #define GRP_COUNT 1 MKL_INT m[GRP_COUNT] = {maxTokenSize}; MKL_INT k[GRP_COUNT] = {maxTokenSize}; MKL_INT n[GRP_COUNT] = {64}; MKL_INT lda[GRP_COUNT] = {maxTokenSize}; MKL_INT ldb[GRP_COUNT] = {B.Stride()}; MKL_INT ldc[GRP_COUNT] = {C.Stride()}; CBLAS_TRANSPOSE transA[GRP_COUNT] = { CblasNoTrans }; CBLAS_TRANSPOSE transB[GRP_COUNT] = { CblasNoTrans }; float alpha[GRP_COUNT] = {1.0}; float beta[GRP_COUNT] = {0.0}; const MKL_INT size_per_grp[GRP_COUNT] = {12}; // Total number of multiplications: 12 float *b_array[12], *c_array[12]; for (int i = 0; i < 12; ++i) { b_array[i] = B.Data() + i * 64; c_array[i] = C.Data() + i * 64; } // Call cblas_sgemm_batch cblas_sgemm_batch ( CblasRowMajor, transA, transB, m, n, k, alpha, (const float **)a_array, lda, (const float **)b_array, ldb, beta, c_array, ldc, GRP_COUNT, size_per_grp); } // Add bias to matrix void biasAdd(hpj::Matrix<float> &m, hpj::Vector<float> &bias) { float *pbias = bias.Data(); #pragma omp parallel for for (int i = 0; i < m.Rows(); ++i) { float *p = m.Row(i); #pragma omp simd for (int j = 0; j < m.Cols(); ++j) { p[j] += pbias[j]; } } } // input and output are both in qk_result void computeSoftmax(int actualTokens) { for (int i = 0; i < actualTokens; ++i) { magic_value[i] = 0; } for (int i = actualTokens; i < maxTokenSize; ++i) { magic_value[i] = -10000; } #pragma omp parallel for for (int i = 0; i < 12; ++i) { float *pbuffer = exp_buffer[i]; for (int row = 0; row < maxTokenSize; ++row) { float sum = 0; // max_val is used to avoid exp(x) = inf float max_val = std::numeric_limits<float>::min(); #pragma omp simd for (int j = 0; j < actualTokens; ++j) { if (qk_result[i][row*maxTokenSize+j] > max_val) { max_val = qk_result[i][row*maxTokenSize+j]; } } max_val *= 0.125f; #ifdef __INTEL_COMPILER #pragma omp simd for (int j = 0; j < maxTokenSize; ++j) { pbuffer[j] = exp(qk_result[i][row*maxTokenSize+j] * 0.125f + magic_value[j] - max_val); sum += pbuffer[j]; } #else #pragma omp simd for (int j = 0; j < maxTokenSize; ++j) { pbuffer[j] = qk_result[i][row*maxTokenSize+j] * 0.125f + magic_value[j] - max_val; } vsExp(maxTokenSize, pbuffer, pbuffer); for (int j = 0; j < maxTokenSize; ++j) { sum += pbuffer[j]; } #endif float r_sum = 1.0f / sum; #pragma omp simd for (int j = 0; j < maxTokenSize; ++j) { qk_result[i][row*maxTokenSize+j] = pbuffer[j] * r_sum; } } } } private: int maxTokenSize; int hiddenSize; int intermediateSize; // Store the result of input*qkvWeight hpj::Matrix<float> qkvMatMul; // Buffer like the dimesion of 128x768 hpj::Matrix<float> resultBuffer1, resultBuffer2; // Buffer to store the result of intermediate hpj::Matrix<float> intermediateBuffer; // Store the BatchMatMul result of query and key float *qk_result[12]; // Store the result of exp for each line float *exp_buffer[12]; // Magic value: 0 or -10000 float *magic_value; int num_threads; #ifndef __INTEL_COMPILER float **erf_buffer; #endif // Merged query, key, value weighs hpj::Matrix<float> qkvWeight; // Merged query, key, value bias hpj::Vector<float> qkvBias; hpj::Matrix<float> attentionOutputWeight; hpj::Vector<float> attentionOutputBias; // batchnorm param hpj::Vector<float> gamma1, beta1; hpj::Vector<float> gamma2, beta2; hpj::Matrix<float> intermediateWeight; hpj::Vector<float> intermediateBias; hpj::Matrix<float> outputWeight; hpj::Vector<float> outputBias; }; #endif
bicubic_interpolation.c
// This program is free software: you can use, modify and/or redistribute it // under the terms of the simplified BSD License. You should have received a // copy of this license along this program. If not, see // <http://www.opensource.org/licenses/bsd-license.html>. // // Copyright (C) 2012, Javier Sánchez Pérez <jsanchez@dis.ulpgc.es> // All rights reserved. #ifndef BICUBIC_INTERPOLATION_C #define BICUBIC_INTERPOLATION_C #include <stdbool.h> #define BOUNDARY_CONDITION 0 //0 Neumann //1 Periodic //2 Symmetric /** * * Neumann boundary condition test * **/ static int neumann_bc(int x, int nx, bool *out) { if(x < 0) { x = 0; *out = true; } else if (x >= nx) { x = nx - 1; *out = true; } return x; } /** * * Periodic boundary condition test * **/ static int periodic_bc(int x, int nx, bool *out) { if(x < 0) { const int n = 1 - (int)(x/(nx+1)); const int ixx = x + n * nx; x = ixx% nx; *out = true; } else if(x >= nx) { x = x % nx; *out = true; } return x; } /** * * Symmetric boundary condition test * **/ static int symmetric_bc(int x, int nx, bool *out) { if(x < 0) { const int borde = nx - 1; const int xx = -x; const int n = (int)(xx/borde) % 2; if ( n ) x = borde - ( xx % borde ); else x = xx % borde; *out = true; } else if ( x >= nx ) { const int borde = nx - 1; const int n = (int)(x/borde) % 2; if ( n ) x = borde - ( x % borde ); else x = x % borde; *out = true; } return x; } /** * * Cubic interpolation in one dimension * **/ static double cubic_interpolation_cell ( double v[4], //interpolation points double x //point to be interpolated ) { return v[1] + 0.5 * x * (v[2] - v[0] + x * (2.0 * v[0] - 5.0 * v[1] + 4.0 * v[2] - v[3] + x * (3.0 * (v[1] - v[2]) + v[3] - v[0]))); } /** * * Bicubic interpolation in two dimensions * **/ static double bicubic_interpolation_cell ( double p[4][4], //array containing the interpolation points double x, //x position to be interpolated double y //y position to be interpolated ) { double v[4]; v[0] = cubic_interpolation_cell(p[0], y); v[1] = cubic_interpolation_cell(p[1], y); v[2] = cubic_interpolation_cell(p[2], y); v[3] = cubic_interpolation_cell(p[3], y); return cubic_interpolation_cell(v, x); } /** * * Compute the bicubic interpolation of a point in an image. * Detect if the point goes outside the image domain. * **/ float bicubic_interpolation_at( const float *input, //image to be interpolated const float uu, //x component of the vector field const float vv, //y component of the vector field const int nx, //image width const int ny, //image height bool border_out //if true, return zero outside the region ) { const int sx = (uu < 0)? -1: 1; const int sy = (vv < 0)? -1: 1; int x, y, mx, my, dx, dy, ddx, ddy; bool out[1] = {false}; //apply the corresponding boundary conditions switch(BOUNDARY_CONDITION) { case 0: x = neumann_bc((int) uu, nx, out); y = neumann_bc((int) vv, ny, out); mx = neumann_bc((int) uu - sx, nx, out); my = neumann_bc((int) vv - sx, ny, out); dx = neumann_bc((int) uu + sx, nx, out); dy = neumann_bc((int) vv + sy, ny, out); ddx = neumann_bc((int) uu + 2*sx, nx, out); ddy = neumann_bc((int) vv + 2*sy, ny, out); break; case 1: x = periodic_bc((int) uu, nx, out); y = periodic_bc((int) vv, ny, out); mx = periodic_bc((int) uu - sx, nx, out); my = periodic_bc((int) vv - sx, ny, out); dx = periodic_bc((int) uu + sx, nx, out); dy = periodic_bc((int) vv + sy, ny, out); ddx = periodic_bc((int) uu + 2*sx, nx, out); ddy = periodic_bc((int) vv + 2*sy, ny, out); break; case 2: x = symmetric_bc((int) uu, nx, out); y = symmetric_bc((int) vv, ny, out); mx = symmetric_bc((int) uu - sx, nx, out); my = symmetric_bc((int) vv - sx, ny, out); dx = symmetric_bc((int) uu + sx, nx, out); dy = symmetric_bc((int) vv + sy, ny, out); ddx = symmetric_bc((int) uu + 2*sx, nx, out); ddy = symmetric_bc((int) vv + 2*sy, ny, out); break; default:x = neumann_bc((int) uu, nx, out); y = neumann_bc((int) vv, ny, out); mx = neumann_bc((int) uu - sx, nx, out); my = neumann_bc((int) vv - sx, ny, out); dx = neumann_bc((int) uu + sx, nx, out); dy = neumann_bc((int) vv + sy, ny, out); ddx = neumann_bc((int) uu + 2*sx, nx, out); ddy = neumann_bc((int) vv + 2*sy, ny, out); break; } if(*out && border_out) return 0.0; else { //obtain the interpolation points of the image const float p11 = input[mx + nx * my]; const float p12 = input[x + nx * my]; const float p13 = input[dx + nx * my]; const float p14 = input[ddx + nx * my]; const float p21 = input[mx + nx * y]; const float p22 = input[x + nx * y]; const float p23 = input[dx + nx * y]; const float p24 = input[ddx + nx * y]; const float p31 = input[mx + nx * dy]; const float p32 = input[x + nx * dy]; const float p33 = input[dx + nx * dy]; const float p34 = input[ddx + nx * dy]; const float p41 = input[mx + nx * ddy]; const float p42 = input[x + nx * ddy]; const float p43 = input[dx + nx * ddy]; const float p44 = input[ddx + nx * ddy]; //create array double pol[4][4] = { {p11, p21, p31, p41}, {p12, p22, p32, p42}, {p13, p23, p33, p43}, {p14, p24, p34, p44} }; //return interpolation return bicubic_interpolation_cell(pol, uu-x, vv-y); } } /** * * Compute the bicubic interpolation of an image. * **/ void bicubic_interpolation_warp( const float *input, // image to be warped const float *u, // x component of the vector field const float *v, // y component of the vector field float *output, // image warped with bicubic interpolation const int nx, // image width const int ny, // image height bool border_out // if true, put zeros outside the region ) { #ifdef _OPENMP #pragma omp parallel for #endif for(int i = 0; i < ny; i++) for(int j = 0; j < nx; j++) { const int p = i * nx + j; const float uu = (float) (j + u[p]); const float vv = (float) (i + v[p]); // obtain the bicubic interpolation at position (uu, vv) output[p] = bicubic_interpolation_at(input, uu, vv, nx, ny, border_out); } } #endif//BICUBIC_INTERPOLATION_C
t009.c
#include<stdint.h> #include<stdlib.h> #include<stdio.h> #include<complex.h> #include<omp.h> #define n_site 4 #define n_mu 4 #define n_color 3 typedef float complex T_el; typedef struct {T_el cmat[n_color][n_color];} T_mu; typedef struct {T_mu mu[n_mu];} T_site; typedef T_site T_field[n_site]; int main(int argc, char **argv) { const T_el zero = 0.0f + 0.0f*I; const T_el val = 1.0f + 1.0f*I; T_field f; for(size_t s = 0; s < n_site; ++s){ for(size_t m = 0; m < n_mu; ++m){ for(size_t a = 0; a < n_color; ++a){ for(size_t b = 0; b < n_color; ++b){ f[s].mu[m].cmat[a][b] += zero; } } } } #pragma omp target teams distribute parallel for map(tofrom:f[0:n_site]) for(size_t s = 0; s < n_site; ++s){ for(size_t m = 0; m < n_mu; ++m){ for(size_t a = 0; a < n_color; ++a){ for(size_t b = 0; b < n_color; ++b){ f[s].mu[m].cmat[a][b] += val; } } } } printf("%.7g + %.7gi\n", creal(f[0].mu[0].cmat[0][0]), cimag(f[0].mu[0].cmat[0][0])); int ret = 0; if(f[0].mu[0].cmat[0][0] != val) ret = 1; return ret; }
GB_unaryop__identity_uint8_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint8_int32 // op(A') function: GB_tran__identity_uint8_int32 // C type: uint8_t // A type: int32_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint8_int32 ( uint8_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint8_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
RadixSortTypes.c
#include "RadixSortTypes.h" void radixSortShort(short *array, int size) { int i; uint32_t *histA = (uint32_t *) calloc(NUM_THREADS << 16, sizeof(uint32_t)); uint16_t *temp = (uint16_t *) malloc(size * sizeof(uint16_t)); if (!histA || !temp) { if (histA) free(histA); if (temp) free(temp); return; } uint16_t *input = (uint16_t *) array; histogram16_t(input, histA, size, pos_int16_t); for (i=size-1;i>=0;--i) temp[--histA[pos_int16_t(input[i])]] = input[i]; #pragma omp parallel for for (i=0;i<size;i++) input[i] = temp[i]; free(histA); free(temp); } void radixSortuint16_t(uint16_t *array, int size) { int i; uint32_t *histA = (uint32_t *) calloc(NUM_THREADS << 16, sizeof(uint32_t)); uint16_t *temp = (uint16_t *) malloc(size * sizeof(uint16_t)); if (!histA || !temp) { if (histA) free(histA); if (temp) free(temp); return; } histogram16_t(array, histA, size, pos_uint16_t); for (i=size-1;i>=0;--i) temp[--histA[array[i]]] = array[i]; #pragma omp parallel for for (i=0;i<size;i++) array[i] = temp[i]; free(histA); free(temp); } void radixSortInt(int *array, int size) { int i; uint32_t *histA = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint32_t *histB = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint32_t *histC = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint32_t *temp = (uint32_t *) malloc(size * sizeof(uint32_t)); uint32_t *input = (uint32_t *) array; if (!histA || !histB || !histC || !temp) { if (histA) free(histA); if (histB) free(histB); if (histC) free(histC); if (temp) free(temp); return; } histogram32_t(input, histA, histB, histC, size, pos_int32_t); for (i=size-1;i>=0;--i) temp[--histA[_x1(input[i])]] = input[i]; for (i=size-1;i>=0;--i) input[--histB[_x2(temp[i])]] = temp[i]; for (i=size-1;i>=0;--i) temp[--histC[_x3(input[i])]] = input[i]; #pragma omp parallel for for (i=0;i<size;i++) input[i] = pos_int32_t(temp[i]); free(histA); free(histB); free(histC); free(temp); } void radixSortuint32_t(uint32_t *array, int size) { int i; uint32_t *histA = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint32_t *histB = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint32_t *histC = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint32_t *temp = (uint32_t *) malloc(size * sizeof(uint32_t)); if (!histA || !histB || !histC || !temp) { if (histA) free(histA); if (histB) free(histB); if (histC) free(histC); if (temp) free(temp); return; } histogram32_t(array, histA, histB, histC, size, pos_uint32_t); for (i=size-1;i>=0;--i) temp[--histA[_x1(array[i])]] = array[i]; for (i=size-1;i>=0;--i) array[--histB[_x2(temp[i])]] = temp[i]; for (i=size-1;i>=0;--i) temp[--histC[_x3(array[i])]] = array[i]; #pragma omp parallel for for (i=0;i<size;i++) array[i] = temp[i]; free(histA); free(histB); free(histC); free(temp); } void radixSortFloat(float *array, int size) { int i; uint32_t *histA = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint32_t *histB = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint32_t *histC = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint32_t *temp = (uint32_t *) malloc(size * sizeof(uint32_t)); uint32_t *input = (uint32_t *) array; if (!histA || !histB || !histC || !temp) { if (histA) free(histA); if (histB) free(histB); if (histC) free(histC); if (temp) free(temp); return; } histogram32_t(input, histA, histB, histC, size, forwardf32_t); for (i=size-1;i>=0;--i) temp[--histA[_x1(input[i])]] = input[i]; for (i=size-1;i>=0;--i) input[--histB[_x2(temp[i])]] = temp[i]; for (i=size-1;i>=0;--i) temp[--histC[_x3(input[i])]] = input[i]; #pragma omp parallel for for (i=0;i<size;i++) input[i] = backf32_t(temp[i]); free(histA); free(histB); free(histC); free(temp); } void radixSortDouble(double *array, int size) { int i; uint32_t *histA = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint32_t *histB = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint32_t *histC = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint32_t *histD = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint32_t *histE = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint32_t *histF = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint64_t *temp = (uint64_t *) malloc(size * sizeof(uint64_t)); uint64_t *input = (uint64_t *) array; if (!histA || !histB || !histC || !histD || !histE || !histF || !temp) { if (histA) free(histA); if (histB) free(histB); if (histC) free(histC); if (histD) free(histD); if (histE) free(histE); if (histF) free(histF); if (temp) free(temp); return; } histogram64_t(input, histA, histB, histC, histD, histE, histF, size, forwardf64_t); for (i=size-1;i>=0;--i) temp[--histA[_x1(input[i])]] = input[i]; for (i=size-1;i>=0;--i) input[--histB[_x2(temp[i])]] = temp[i]; for (i=size-1;i>=0;--i) temp[--histC[_x3(input[i])]] = input[i]; for (i=size-1;i>=0;--i) input[--histD[_x4(temp[i])]] = temp[i]; for (i=size-1;i>=0;--i) temp[--histE[_x5(input[i])]] = input[i]; for (i=size-1;i>=0;--i) input[--histF[_x6(temp[i])]] = backf64_t(temp[i]); free(histA); free(histB); free(histC); free(histD); free(histE); free(histF); free(temp); } void radixSortint64_t(int64_t *array, int size) { int i; uint32_t *histA = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint32_t *histB = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint32_t *histC = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint32_t *histD = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint32_t *histE = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint32_t *histF = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint64_t *temp = (uint64_t *) malloc(size * sizeof(uint64_t)); uint64_t *input = (uint64_t *) array; if (!histA || !histB || !histC || !histD || !histE || !histF || !temp) { if (histA) free(histA); if (histB) free(histB); if (histC) free(histC); if (histD) free(histD); if (histE) free(histE); if (histF) free(histF); if (temp) free(temp); return; } histogram64_t(input, histA, histB, histC, histD, histE, histF, size, pos_int64_t); for (i=size-1;i>=0;--i) temp[--histA[_x1(input[i])]] = input[i]; for (i=size-1;i>=0;--i) input[--histB[_x2(temp[i])]] = temp[i]; for (i=size-1;i>=0;--i) temp[--histC[_x3(input[i])]] = input[i]; for (i=size-1;i>=0;--i) input[--histD[_x4(temp[i])]] = temp[i]; for (i=size-1;i>=0;--i) temp[--histE[_x5(input[i])]] = input[i]; for (i=size-1;i>=0;--i) input[--histF[_x6(temp[i])]] = pos_int64_t(temp[i]); free(histA); free(histB); free(histC); free(histD); free(histE); free(histF); free(temp); } void radixSortuint64_t(uint64_t *input, int size) { int i; uint32_t *histA = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint32_t *histB = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint32_t *histC = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint32_t *histD = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint32_t *histE = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint32_t *histF = (uint32_t *) calloc(HIST_FULL, sizeof(uint32_t)); uint64_t *temp = (uint64_t *) malloc(size * sizeof(uint64_t)); if (!histA || !histB || !histC || !histD || !histE || !histF || !temp) { if (histA) free(histA); if (histB) free(histB); if (histC) free(histC); if (histD) free(histD); if (histE) free(histE); if (histF) free(histF); if (temp) free(temp); return; } histogram64_t(input, histA, histB, histC, histD, histE, histF, size, pos_uint64_t); for (i=size-1;i>=0;--i) temp[--histA[_x1(input[i])]] = input[i]; for (i=size-1;i>=0;--i) input[--histB[_x2(temp[i])]] = temp[i]; for (i=size-1;i>=0;--i) temp[--histC[_x3(input[i])]] = input[i]; for (i=size-1;i>=0;--i) input[--histD[_x4(temp[i])]] = temp[i]; for (i=size-1;i>=0;--i) temp[--histE[_x5(input[i])]] = input[i]; for (i=size-1;i>=0;--i) input[--histF[_x6(temp[i])]] = temp[i]; free(histA); free(histB); free(histC); free(histD); free(histE); free(histF); free(temp); }
main.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #define N 1000 #define CHUNK 10 int main() { int i; #pragma omp parallel for private(i) num_threads(5) schedule(dynamic, CHUNK) for(i = 0; i < 16; i++) { sleep(i); printf("Il thread %d ha completato iterazione %d.\n", omp_get_thread_num() , i); } printf("Tutti i thread hanno terminato! \n"); return 0; }
SwathFileConsumer.h
// -------------------------------------------------------------------------- // OpenMS -- Open-Source Mass Spectrometry // -------------------------------------------------------------------------- // Copyright The OpenMS Team -- Eberhard Karls University Tuebingen, // ETH Zurich, and Freie Universitaet Berlin 2002-2016. // // This software is released under a three-clause BSD license: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of any author or any participating institution // may be used to endorse or promote products derived from this software // without specific prior written permission. // For a full list of authors, refer to the file AUTHORS. // -------------------------------------------------------------------------- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING // INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // -------------------------------------------------------------------------- // $Maintainer: Hannes Roest $ // $Authors: Hannes Roest $ // -------------------------------------------------------------------------- #ifndef OPENMS_FORMAT_DATAACCESS_SWATHFILECONSUMER_H #define OPENMS_FORMAT_DATAACCESS_SWATHFILECONSUMER_H #include <boost/cast.hpp> // Datastructures #include <OpenMS/ANALYSIS/OPENSWATH/OPENSWATHALGO/DATAACCESS/DataStructures.h> #include <OpenMS/ANALYSIS/OPENSWATH/OPENSWATHALGO/DATAACCESS/SwathMap.h> // Consumers #include <OpenMS/FORMAT/DATAACCESS/MSDataCachedConsumer.h> #include <OpenMS/FORMAT/DATAACCESS/MSDataTransformingConsumer.h> // Helpers #include <OpenMS/ANALYSIS/OPENSWATH/OpenSwathHelper.h> #include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/SimpleOpenMSSpectraAccessFactory.h> #include <OpenMS/INTERFACES/IMSDataConsumer.h> #include <OpenMS/FORMAT/CachedMzML.h> #ifdef _OPENMP #include <omp.h> #endif namespace OpenMS { /** * @brief Abstract base class which can consume spectra coming from SWATH experiment stored in a single file. * * The class consumes spectra which are coming from a complete SWATH * experiment. It will group MS2 spectra by their precursor m/z, assuming * that they correspond to the same SWATH window. For example, the spectra * could be arranged in the following fashion: * * - MS1 Spectrum (no precursor) * - MS2 Spectrum (precursor = [400,425]) * - MS2 Spectrum (precursor = [425,450]) * - [...] * - MS2 Spectrum (precursor = [1175,1200]) * - MS1 Spectrum (no precursor) * - MS2 Spectrum (precursor = [400,425]) * - MS2 Spectrum (precursor = [425,450]) * - [...] * * Base classes are expected to implement functions consuming a spectrum coming * from a specific SWATH or an MS1 spectrum and a final function * ensureMapsAreFilled_ after which the swath_maps_ vector needs to contain * valid pointers to MSExperiment. * * In addition it is possible to provide the swath boundaries and the read in * spectra will be matched by their precursor m/z to the "center" attribute * of the provided Swath maps. * * Usage: * * @code * FullSwathFileConsumer * dataConsumer; * // assign dataConsumer to an implementation of FullSwathFileConsumer * MzMLFile().transform(file, dataConsumer); * dataConsumer->retrieveSwathMaps(maps); * @endcode * */ class OPENMS_DLLAPI FullSwathFileConsumer : public Interfaces::IMSDataConsumer<> { public: typedef MSExperiment<> MapType; typedef MapType::SpectrumType SpectrumType; typedef MapType::ChromatogramType ChromatogramType; FullSwathFileConsumer() : ms1_map_(), // initialize to null consuming_possible_(true), use_external_boundaries_(false), correct_window_counter_(0) { use_external_boundaries_ = !swath_map_boundaries_.empty(); } /** * @brief Constructor * * @param swath_boundaries A vector of SwathMaps of which only the center, * lower and upper attributes will be used to infer the expected Swath maps. * */ FullSwathFileConsumer(std::vector<OpenSwath::SwathMap> swath_boundaries) : swath_map_boundaries_(swath_boundaries), ms1_map_(), // initialize to null consuming_possible_(true), use_external_boundaries_(false), correct_window_counter_(0) { use_external_boundaries_ = !swath_map_boundaries_.empty(); } ~FullSwathFileConsumer() {} void setExpectedSize(Size, Size) {} void setExperimentalSettings(const ExperimentalSettings& exp) {settings_ = exp; } /** * @brief Populate the vector of swath maps after consuming all spectra. * * Will populate the input vector with SwathMap objects which correspond to * the MS1 map (if present) and the MS2 maps (SWATH maps). This should be * called after all spectra are consumed. * * @note It is not possible to consume any more spectra after calling this * function (it contains finalization code and may close file streams). * */ void retrieveSwathMaps(std::vector<OpenSwath::SwathMap>& maps) { consuming_possible_ = false; // make consumption of further spectra / chromatograms impossible ensureMapsAreFilled_(); if (ms1_map_) { OpenSwath::SwathMap map; map.sptr = SimpleOpenMSSpectraFactory::getSpectrumAccessOpenMSPtr(ms1_map_); map.lower = -1; map.upper = -1; map.center = -1; map.ms1 = true; maps.push_back(map); } // Print warning if the lower/upper window could not be determined and we // required manual determination of the boundaries. if (!use_external_boundaries_ && correct_window_counter_ != swath_maps_.size()) { std::cout << "WARNING: Could not correctly read the upper/lower limits of the SWATH windows from your input file. Read " << correct_window_counter_ << " correct (non-zero) window limits (expected " << swath_maps_.size() << " windows)." << std::endl; } size_t nonempty_maps = 0; for (Size i = 0; i < swath_maps_.size(); i++) { OpenSwath::SwathMap map; map.sptr = SimpleOpenMSSpectraFactory::getSpectrumAccessOpenMSPtr(swath_maps_[i]); map.lower = swath_map_boundaries_[i].lower; map.upper = swath_map_boundaries_[i].upper; map.center = swath_map_boundaries_[i].center; map.ms1 = false; maps.push_back(map); if (map.sptr->getNrSpectra() > 0) {nonempty_maps++;} } if (nonempty_maps != swath_map_boundaries_.size()) { std::cout << "WARNING: The number nonempty maps found in the input file (" << nonempty_maps << ") is not equal to the number of provided swath window boundaries (" << swath_map_boundaries_.size() << "). Please check your input." << std::endl; } } /// Consume a chromatogram -> should not happen when dealing with SWATH maps void consumeChromatogram(MapType::ChromatogramType&) { std::cerr << "Read chromatogram while reading SWATH files, did not expect that!" << std::endl; } /** * @brief * Consume a spectrum which may belong either to an MS1 scan or * one of n MS2 (SWATH) scans * */ void consumeSpectrum(MapType::SpectrumType& s) { if (!consuming_possible_) { throw Exception::IllegalArgument(__FILE__, __LINE__, __PRETTY_FUNCTION__, "FullSwathFileConsumer cannot consume any more spectra after retrieveSwathMaps has been called already"); } if (s.getMSLevel() == 1) { consumeMS1Spectrum_(s); } else { if (s.getPrecursors().empty()) { throw Exception::InvalidParameter(__FILE__, __LINE__, __PRETTY_FUNCTION__, "Swath scan does not provide a precursor."); } const std::vector<Precursor> prec = s.getPrecursors(); double center = prec[0].getMZ(); double lower = prec[0].getMZ() - prec[0].getIsolationWindowLowerOffset(); double upper = prec[0].getMZ() + prec[0].getIsolationWindowUpperOffset(); bool found = false; // Check if enough information is present to infer the swath if (center <= 0.0) { throw Exception::InvalidParameter(__FILE__, __LINE__, __PRETTY_FUNCTION__, "Swath scan does not provide any precursor isolation information."); } // try to match the current scan to one of the already known windows for (Size i = 0; i < swath_map_boundaries_.size(); i++) { // We group by the precursor mz (center of the window) since this // should be present in all SWATH scans. if (std::fabs(center - swath_map_boundaries_[i].center) < 1e-6) { found = true; consumeSwathSpectrum_(s, i); } } if (!found) { if (use_external_boundaries_) { throw Exception::InvalidParameter(__FILE__, __LINE__, __PRETTY_FUNCTION__, String("Encountered SWATH scan with boundary ") + center + " m/z which was not present in the provided windows."); } else { consumeSwathSpectrum_(s, swath_map_boundaries_.size()); // we found a new SWATH window if (lower > 0.0 && upper > 0.0) {correct_window_counter_++;} OpenSwath::SwathMap boundary; boundary.lower = lower; boundary.upper = upper; boundary.center = center; swath_map_boundaries_.push_back(boundary); LOG_DEBUG << "Adding Swath centered at " << center << " m/z with an isolation window of " << lower << " to " << upper << " m/z." << std::endl; } } } } protected: /** * @brief Consume an MS2 spectrum belonging to SWATH "swath_nr" * * This function should handle a spectrum belonging to a specific SWATH * (indicated by swath_nr). * */ virtual void consumeSwathSpectrum_(MapType::SpectrumType& s, size_t swath_nr) = 0; /** * @brief Consume an MS1 spectrum * * This function should handle an MS1 spectrum. * */ virtual void consumeMS1Spectrum_(MapType::SpectrumType& s) = 0; /** * @brief Callback function after the reading is complete * * Has to ensure that swath_maps_ and ms1_map_ are correctly populated. */ virtual void ensureMapsAreFilled_() = 0; /// A list of Swath map identifiers (lower/upper boundary and center) std::vector<OpenSwath::SwathMap> swath_map_boundaries_; /// A list of SWATH maps and the MS1 map std::vector<boost::shared_ptr<MSExperiment<> > > swath_maps_; boost::shared_ptr<MSExperiment<> > ms1_map_; /// The Experimental settings // (MSExperiment has no constructor using ExperimentalSettings) MSExperiment<> settings_; /// Whether further spectra can still be consumed bool consuming_possible_; /// Whether to use external input for SWATH boundaries bool use_external_boundaries_; /// How many windows were correctly annotated (non-zero window limits) size_t correct_window_counter_; }; /** * @brief In-memory implementation of FullSwathFileConsumer * * Keeps all the spectra in memory by just appending them to an MSExperiment. * */ class OPENMS_DLLAPI RegularSwathFileConsumer : public FullSwathFileConsumer { public: typedef MSExperiment<> MapType; typedef MapType::SpectrumType SpectrumType; typedef MapType::ChromatogramType ChromatogramType; RegularSwathFileConsumer() {} RegularSwathFileConsumer(std::vector<OpenSwath::SwathMap> known_window_boundaries) : FullSwathFileConsumer(known_window_boundaries) {} protected: void addNewSwathMap_() { boost::shared_ptr<MSExperiment<Peak1D> > exp(new MSExperiment<Peak1D>(settings_)); swath_maps_.push_back(exp); } void consumeSwathSpectrum_(MapType::SpectrumType& s, size_t swath_nr) { while (swath_maps_.size() <= swath_nr) { addNewSwathMap_(); } swath_maps_[swath_nr]->addSpectrum(s); } void addMS1Map_() { boost::shared_ptr<MSExperiment<Peak1D> > exp(new MSExperiment<Peak1D>(settings_)); ms1_map_ = exp; } void consumeMS1Spectrum_(MapType::SpectrumType& s) { if (!ms1_map_) { addMS1Map_(); } ms1_map_->addSpectrum(s); } void ensureMapsAreFilled_() {} }; /** * @brief On-disk cached implementation of FullSwathFileConsumer * * Writes all spectra immediately to disk in a user-specified caching * location using the MSDataCachedConsumer. Internally, it handles * n+1 (n SWATH + 1 MS1 map) objects of MSDataCachedConsumer which can consume the * spectra and write them to disk immediately. * */ class OPENMS_DLLAPI CachedSwathFileConsumer : public FullSwathFileConsumer { public: typedef MSExperiment<> MapType; typedef MapType::SpectrumType SpectrumType; typedef MapType::ChromatogramType ChromatogramType; CachedSwathFileConsumer(String cachedir, String basename, Size nr_ms1_spectra, std::vector<int> nr_ms2_spectra) : ms1_consumer_(NULL), swath_consumers_(), cachedir_(cachedir), basename_(basename), nr_ms1_spectra_(nr_ms1_spectra), nr_ms2_spectra_(nr_ms2_spectra) {} CachedSwathFileConsumer(std::vector<OpenSwath::SwathMap> known_window_boundaries, String cachedir, String basename, Size nr_ms1_spectra, std::vector<int> nr_ms2_spectra) : FullSwathFileConsumer(known_window_boundaries), ms1_consumer_(NULL), swath_consumers_(), cachedir_(cachedir), basename_(basename), nr_ms1_spectra_(nr_ms1_spectra), nr_ms2_spectra_(nr_ms2_spectra) {} ~CachedSwathFileConsumer() { // Properly delete the MSDataCachedConsumer -> free memory and _close_ file stream while (!swath_consumers_.empty()) { delete swath_consumers_.back(); swath_consumers_.pop_back(); } if (ms1_consumer_ != NULL) { delete ms1_consumer_; ms1_consumer_ = NULL; } } protected: void addNewSwathMap_() { String meta_file = cachedir_ + basename_ + "_" + String(swath_consumers_.size()) + ".mzML"; String cached_file = meta_file + ".cached"; MSDataCachedConsumer* consumer = new MSDataCachedConsumer(cached_file, true); consumer->setExpectedSize(nr_ms2_spectra_[swath_consumers_.size()], 0); swath_consumers_.push_back(consumer); // maps for meta data boost::shared_ptr<MSExperiment<Peak1D> > exp(new MSExperiment<Peak1D>(settings_)); swath_maps_.push_back(exp); } void consumeSwathSpectrum_(MapType::SpectrumType& s, size_t swath_nr) { while (swath_maps_.size() <= swath_nr) { addNewSwathMap_(); } swath_consumers_[swath_nr]->consumeSpectrum(s); swath_maps_[swath_nr]->addSpectrum(s); // append for the metadata (actual data is deleted) } void addMS1Map_() { String meta_file = cachedir_ + basename_ + "_ms1.mzML"; String cached_file = meta_file + ".cached"; ms1_consumer_ = new MSDataCachedConsumer(cached_file, true); ms1_consumer_->setExpectedSize(nr_ms1_spectra_, 0); boost::shared_ptr<MSExperiment<Peak1D> > exp(new MSExperiment<Peak1D>(settings_)); ms1_map_ = exp; } void consumeMS1Spectrum_(MapType::SpectrumType& s) { if (ms1_consumer_ == NULL) { addMS1Map_(); } ms1_consumer_->consumeSpectrum(s); ms1_map_->addSpectrum(s); // append for the metadata (actual data is deleted) } void ensureMapsAreFilled_() { size_t swath_consumers_size = swath_consumers_.size(); bool have_ms1 = (ms1_consumer_ != NULL); // Properly delete the MSDataCachedConsumer -> free memory and _close_ file stream // The file streams to the cached data on disc can and should be closed // here safely. Since ensureMapsAreFilled_ is called after consuming all // the spectra, there will be no more spectra to append but the client // might already want to read after this call, so all data needs to be // present on disc and the file streams closed. // // TODO merge with destructor code into own function! while (!swath_consumers_.empty()) { delete swath_consumers_.back(); swath_consumers_.pop_back(); } if (ms1_consumer_ != NULL) { delete ms1_consumer_; ms1_consumer_ = NULL; } if (have_ms1) { boost::shared_ptr<MSExperiment<Peak1D> > exp(new MSExperiment<Peak1D>); String meta_file = cachedir_ + basename_ + "_ms1.mzML"; // write metadata to disk and store the correct data processing tag CachedmzML().writeMetadata(*ms1_map_, meta_file, true); MzMLFile().load(meta_file, *exp.get()); ms1_map_ = exp; } #ifdef _OPENMP #pragma omp parallel for #endif for (SignedSize i = 0; i < boost::numeric_cast<SignedSize>(swath_consumers_size); i++) { boost::shared_ptr<MSExperiment<Peak1D> > exp(new MSExperiment<Peak1D>); String meta_file = cachedir_ + basename_ + "_" + String(i) + ".mzML"; // write metadata to disk and store the correct data processing tag CachedmzML().writeMetadata(*swath_maps_[i], meta_file, true); MzMLFile().load(meta_file, *exp.get()); swath_maps_[i] = exp; } } MSDataCachedConsumer* ms1_consumer_; std::vector<MSDataCachedConsumer*> swath_consumers_; String cachedir_; String basename_; int nr_ms1_spectra_; std::vector<int> nr_ms2_spectra_; }; } #endif
rom_residuals_utility.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ \. // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: RAUL BRAVO // #if !defined( ROM_RESIDUALS_UTILITY_H_INCLUDED ) #define ROM_RESIDUALS_UTILITY_H_INCLUDED /* Project includes */ #include "includes/define.h" #include "includes/model_part.h" #include "solving_strategies/schemes/scheme.h" #include "spaces/ublas_space.h" /* Application includes */ #include "rom_application_variables.h" namespace Kratos { typedef UblasSpace<double, CompressedMatrix, boost::numeric::ublas::vector<double>> SparseSpaceType; typedef UblasSpace<double, Matrix, Vector> LocalSpaceType; typedef Scheme<SparseSpaceType, LocalSpaceType> BaseSchemeType; // This utility returns the converged residuals projected onto the ROM basis Phi. class RomResidualsUtility { public: KRATOS_CLASS_POINTER_DEFINITION(RomResidualsUtility); RomResidualsUtility( ModelPart& rModelPart, Parameters ThisParameters, BaseSchemeType::Pointer pScheme ): mpModelPart(rModelPart), mpScheme(pScheme){ // Validate default parameters Parameters default_parameters = Parameters(R"( { "nodal_unknowns" : [], "number_of_rom_dofs" : 10 })" ); ThisParameters.ValidateAndAssignDefaults(default_parameters); mNodalVariablesNames = ThisParameters["nodal_unknowns"].GetStringArray(); mNodalDofs = mNodalVariablesNames.size(); mRomDofs = ThisParameters["number_of_rom_dofs"].GetInt(); // Setting up mapping: VARIABLE_KEY --> CORRECT_ROW_IN_BASIS for(int k=0; k<mNodalDofs; k++){ if(KratosComponents<Variable<double>>::Has(mNodalVariablesNames[k])) { const auto& var = KratosComponents<Variable<double>>::Get(mNodalVariablesNames[k]); MapPhi[var.Key()] = k; } else KRATOS_ERROR << "variable \""<< mNodalVariablesNames[k] << "\" not valid" << std::endl; } } ~RomResidualsUtility()= default; void GetPhiElemental( Matrix &PhiElemental, const Element::DofsVectorType &dofs, const Element::GeometryType &geom) { const auto *pcurrent_rom_nodal_basis = &(geom[0].GetValue(ROM_BASIS)); int counter = 0; for(unsigned int k = 0; k < dofs.size(); ++k){ auto variable_key = dofs[k]->GetVariable().Key(); if(k==0) pcurrent_rom_nodal_basis = &(geom[counter].GetValue(ROM_BASIS)); else if(dofs[k]->Id() != dofs[k-1]->Id()){ counter++; pcurrent_rom_nodal_basis = &(geom[counter].GetValue(ROM_BASIS)); } if (dofs[k]->IsFixed()) noalias(row(PhiElemental, k)) = ZeroVector(PhiElemental.size2()); else noalias(row(PhiElemental, k)) = row(*pcurrent_rom_nodal_basis, MapPhi[variable_key]); } } Matrix Calculate() { // Getting the number of elements and conditions from the model const int nelements = static_cast<int>(mpModelPart.Elements().size()); const int nconditions = static_cast<int>(mpModelPart.Conditions().size()); const auto& CurrentProcessInfo = mpModelPart.GetProcessInfo(); const auto el_begin = mpModelPart.ElementsBegin(); const auto cond_begin = mpModelPart.ConditionsBegin(); //contributions to the system Matrix LHS_Contribution = ZeroMatrix(0, 0); Vector RHS_Contribution = ZeroVector(0); //vector containing the localization in the system of the different terms Element::EquationIdVectorType EquationId; Matrix MatrixResiduals( (nelements + nconditions), mRomDofs); // Matrix of reduced residuals. Matrix PhiElemental; #pragma omp parallel firstprivate(nelements, nconditions, LHS_Contribution, RHS_Contribution, EquationId, PhiElemental, el_begin, cond_begin) { #pragma omp for nowait for (int k = 0; k < nelements; k++){ auto it_el = el_begin + k; //detect if the element is active or not. If the user did not make any choice the element is active by default bool element_is_active = true; if ((it_el)->IsDefined(ACTIVE)) element_is_active = (it_el)->Is(ACTIVE); if (element_is_active){ //calculate elemental contribution mpScheme->CalculateSystemContributions(*it_el, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo); Element::DofsVectorType dofs; it_el->GetDofList(dofs, CurrentProcessInfo); //assemble the elemental contribution - here is where the ROM acts //compute the elemental reduction matrix PhiElemental const auto& geom = it_el->GetGeometry(); if(PhiElemental.size1() != dofs.size() || PhiElemental.size2() != mRomDofs) PhiElemental.resize(dofs.size(), mRomDofs,false); GetPhiElemental(PhiElemental, dofs, geom); noalias(row(MatrixResiduals, k)) = prod(trans(PhiElemental), RHS_Contribution); // The size of the residual will vary only when using more ROM modes, one row per condition } } #pragma omp for nowait for (int k = 0; k < nconditions; k++){ ModelPart::ConditionsContainerType::iterator it = cond_begin + k; //detect if the condition is active or not. If the user did not make any choice the condition is active by default bool condition_is_active = true; if ((it)->IsDefined(ACTIVE)) condition_is_active = (it)->Is(ACTIVE); if (condition_is_active){ Condition::DofsVectorType dofs; it->GetDofList(dofs, CurrentProcessInfo); //calculate elemental contribution mpScheme->CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo); //assemble the elemental contribution - here is where the ROM acts //compute the elemental reduction matrix PhiElemental const auto& geom = it->GetGeometry(); if(PhiElemental.size1() != dofs.size() || PhiElemental.size2() != mRomDofs) PhiElemental.resize(dofs.size(), mRomDofs,false); GetPhiElemental(PhiElemental, dofs, geom); noalias(row(MatrixResiduals, k+nelements)) = prod(trans(PhiElemental), RHS_Contribution); // The size of the residual will vary only when using more ROM modes, one row per condition } } } return MatrixResiduals; } protected: std::vector< std::string > mNodalVariablesNames; int mNodalDofs; unsigned int mRomDofs; ModelPart& mpModelPart; BaseSchemeType::Pointer mpScheme; std::unordered_map<Kratos::VariableData::KeyType,int> MapPhi; }; } // namespace Kratos #endif // ROM_RESIDUALS_UTILITY_H_INCLUDED defined
draw.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD RRRR AAA W W % % D D R R A A W W % % D D RRRR AAAAA W W W % % D D R RN A A WW WW % % DDDD R R A A W W % % % % % % MagickCore Image Drawing Methods % % % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon % rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion", % Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent % (www.appligent.com) contributed the dash pattern, linecap stroking % algorithm, and minor rendering improvements. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Define declarations. */ #define BezierQuantum 200 #define PrimitiveExtentPad 4296.0 #define MaxBezierCoordinates 67108864 #define ThrowPointExpectedException(token,exception) \ { \ (void) ThrowMagickException(exception,GetMagickModule(),DrawError, \ "NonconformingDrawingPrimitiveDefinition","`%s'",token); \ status=MagickFalse; \ break; \ } /* Typedef declarations. */ typedef struct _EdgeInfo { SegmentInfo bounds; double scanline; PointInfo *points; size_t number_points; ssize_t direction; MagickBooleanType ghostline; size_t highwater; } EdgeInfo; typedef struct _ElementInfo { double cx, cy, major, minor, angle; } ElementInfo; typedef struct _MVGInfo { PrimitiveInfo **primitive_info; size_t *extent; ssize_t offset; PointInfo point; ExceptionInfo *exception; } MVGInfo; typedef struct _PolygonInfo { EdgeInfo *edges; size_t number_edges; } PolygonInfo; typedef enum { MoveToCode, OpenCode, GhostlineCode, LineToCode, EndCode } PathInfoCode; typedef struct _PathInfo { PointInfo point; PathInfoCode code; } PathInfo; /* Forward declarations. */ static Image *DrawClippingMask(Image *,const DrawInfo *,const char *,const char *, ExceptionInfo *); static MagickBooleanType DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *, ExceptionInfo *), RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *), TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo, const double,const MagickBooleanType,const MagickBooleanType), TraceBezier(MVGInfo *,const size_t), TraceCircle(MVGInfo *,const PointInfo,const PointInfo), TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo), TraceSquareLinecap(PrimitiveInfo *,const size_t,const double); static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *,const PrimitiveInfo *,ExceptionInfo *); static ssize_t TracePath(MVGInfo *,const char *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireDrawInfo() returns a DrawInfo structure properly initialized. % % The format of the AcquireDrawInfo method is: % % DrawInfo *AcquireDrawInfo(void) % */ MagickExport DrawInfo *AcquireDrawInfo(void) { DrawInfo *draw_info; draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info)); GetDrawInfo((ImageInfo *) NULL,draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneDrawInfo() makes a copy of the given draw_info structure. If NULL % is specified, a new DrawInfo structure is created initialized to default % values. % % The format of the CloneDrawInfo method is: % % DrawInfo *CloneDrawInfo(const ImageInfo *image_info, % const DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info. % % o draw_info: the draw info. % */ MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info, const DrawInfo *draw_info) { DrawInfo *clone_info; ExceptionInfo *exception; clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetDrawInfo(image_info,clone_info); if (draw_info == (DrawInfo *) NULL) return(clone_info); exception=AcquireExceptionInfo(); if (draw_info->id != (char *) NULL) (void) CloneString(&clone_info->id,draw_info->id); if (draw_info->primitive != (char *) NULL) (void) CloneString(&clone_info->primitive,draw_info->primitive); if (draw_info->geometry != (char *) NULL) (void) CloneString(&clone_info->geometry,draw_info->geometry); clone_info->compliance=draw_info->compliance; clone_info->viewbox=draw_info->viewbox; clone_info->affine=draw_info->affine; clone_info->gravity=draw_info->gravity; clone_info->fill=draw_info->fill; clone_info->stroke=draw_info->stroke; clone_info->stroke_width=draw_info->stroke_width; if (draw_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue, exception); if (draw_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke_antialias=draw_info->stroke_antialias; clone_info->text_antialias=draw_info->text_antialias; clone_info->fill_rule=draw_info->fill_rule; clone_info->linecap=draw_info->linecap; clone_info->linejoin=draw_info->linejoin; clone_info->miterlimit=draw_info->miterlimit; clone_info->dash_offset=draw_info->dash_offset; clone_info->decorate=draw_info->decorate; clone_info->compose=draw_info->compose; if (draw_info->text != (char *) NULL) (void) CloneString(&clone_info->text,draw_info->text); if (draw_info->font != (char *) NULL) (void) CloneString(&clone_info->font,draw_info->font); if (draw_info->metrics != (char *) NULL) (void) CloneString(&clone_info->metrics,draw_info->metrics); if (draw_info->family != (char *) NULL) (void) CloneString(&clone_info->family,draw_info->family); clone_info->style=draw_info->style; clone_info->stretch=draw_info->stretch; clone_info->weight=draw_info->weight; if (draw_info->encoding != (char *) NULL) (void) CloneString(&clone_info->encoding,draw_info->encoding); clone_info->pointsize=draw_info->pointsize; clone_info->kerning=draw_info->kerning; clone_info->interline_spacing=draw_info->interline_spacing; clone_info->interword_spacing=draw_info->interword_spacing; clone_info->direction=draw_info->direction; if (draw_info->density != (char *) NULL) (void) CloneString(&clone_info->density,draw_info->density); clone_info->align=draw_info->align; clone_info->undercolor=draw_info->undercolor; clone_info->border_color=draw_info->border_color; if (draw_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) { ssize_t x; for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ; clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*clone_info->dash_pattern)); if (clone_info->dash_pattern == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)* sizeof(*clone_info->dash_pattern)); (void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t) (x+1)*sizeof(*clone_info->dash_pattern)); } clone_info->gradient=draw_info->gradient; if (draw_info->gradient.stops != (StopInfo *) NULL) { size_t number_stops; number_stops=clone_info->gradient.number_stops; clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t) number_stops,sizeof(*clone_info->gradient.stops)); if (clone_info->gradient.stops == (StopInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops, (size_t) number_stops*sizeof(*clone_info->gradient.stops)); } clone_info->bounds=draw_info->bounds; clone_info->fill_alpha=draw_info->fill_alpha; clone_info->stroke_alpha=draw_info->stroke_alpha; clone_info->element_reference=draw_info->element_reference; clone_info->clip_path=draw_info->clip_path; clone_info->clip_units=draw_info->clip_units; if (draw_info->clip_mask != (char *) NULL) (void) CloneString(&clone_info->clip_mask,draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0, MagickTrue,exception); if (draw_info->composite_mask != (Image *) NULL) clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0, MagickTrue,exception); clone_info->render=draw_info->render; clone_info->debug=draw_info->debug; exception=DestroyExceptionInfo(exception); return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P a t h T o P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPathToPolygon() converts a path to the more efficient sorted % rendering form. % % The format of the ConvertPathToPolygon method is: % % PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info, % ExceptionInfo *excetion) % % A description of each parameter follows: % % o ConvertPathToPolygon() returns the path in a more efficient sorted % rendering form of type PolygonInfo. % % o draw_info: Specifies a pointer to an DrawInfo structure. % % o path_info: Specifies a pointer to an PathInfo structure. % % */ static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) { ssize_t i; if (polygon_info->edges != (EdgeInfo *) NULL) { for (i=0; i < (ssize_t) polygon_info->number_edges; i++) if (polygon_info->edges[i].points != (PointInfo *) NULL) polygon_info->edges[i].points=(PointInfo *) RelinquishMagickMemory(polygon_info->edges[i].points); polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory( polygon_info->edges); } return((PolygonInfo *) RelinquishMagickMemory(polygon_info)); } #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int DrawCompareEdges(const void *p_edge,const void *q_edge) { #define DrawCompareEdge(p,q) \ { \ if (((p)-(q)) < 0.0) \ return(-1); \ if (((p)-(q)) > 0.0) \ return(1); \ } const PointInfo *p, *q; /* Edge sorting for right-handed coordinate system. */ p=((const EdgeInfo *) p_edge)->points; q=((const EdgeInfo *) q_edge)->points; DrawCompareEdge(p[0].y,q[0].y); DrawCompareEdge(p[0].x,q[0].x); DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)* (q[1].x-q[0].x)); DrawCompareEdge(p[1].y,q[1].y); DrawCompareEdge(p[1].x,q[1].x); return(0); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static void LogPolygonInfo(const PolygonInfo *polygon_info) { EdgeInfo *p; ssize_t i, j; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge"); p=polygon_info->edges; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { (void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:", (double) i); (void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s", p->direction != MagickFalse ? "down" : "up"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s", p->ghostline != MagickFalse ? "transparent" : "opaque"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1, p->bounds.x2,p->bounds.y2); for (j=0; j < (ssize_t) p->number_points; j++) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g", p->points[j].x,p->points[j].y); p++; } (void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge"); } static void ReversePoints(PointInfo *points,const size_t number_points) { PointInfo point; ssize_t i; for (i=0; i < (ssize_t) (number_points >> 1); i++) { point=points[i]; points[i]=points[number_points-(i+1)]; points[number_points-(i+1)]=point; } } static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info, ExceptionInfo *exception) { long direction, next_direction; PointInfo point, *points; PolygonInfo *polygon_info; SegmentInfo bounds; ssize_t i, n; MagickBooleanType ghostline; size_t edge, number_edges, number_points; /* Convert a path to the more efficient sorted rendering form. */ polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info)); if (polygon_info == (PolygonInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PolygonInfo *) NULL); } number_edges=16; polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } (void) memset(polygon_info->edges,0,number_edges* sizeof(*polygon_info->edges)); direction=0; edge=0; ghostline=MagickFalse; n=0; number_points=0; points=(PointInfo *) NULL; (void) memset(&point,0,sizeof(point)); (void) memset(&bounds,0,sizeof(bounds)); polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=0.0; polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) direction; polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->number_edges=0; for (i=0; path_info[i].code != EndCode; i++) { if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) || (path_info[i].code == GhostlineCode)) { /* Move to. */ if ((points != (PointInfo *) NULL) && (n >= 2)) { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); points=(PointInfo *) RelinquishMagickMemory(points); return(DestroyPolygonInfo(polygon_info)); } } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; polygon_info->number_edges=edge; } if (points == (PointInfo *) NULL) { number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse; point=path_info[i].point; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; direction=0; n=1; continue; } /* Line to. */ next_direction=((path_info[i].point.y > point.y) || ((fabs(path_info[i].point.y-point.y) < MagickEpsilon) && (path_info[i].point.x > point.x))) ? 1 : -1; if ((points != (PointInfo *) NULL) && (direction != 0) && (direction != next_direction)) { /* New edge. */ point=points[n-1]; if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); points=(PointInfo *) RelinquishMagickMemory(points); return(DestroyPolygonInfo(polygon_info)); } } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; polygon_info->number_edges=edge+1; points=(PointInfo *) NULL; number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } n=1; ghostline=MagickFalse; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; edge++; } direction=next_direction; if (points == (PointInfo *) NULL) continue; if (n == (ssize_t) number_points) { number_points<<=1; points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } point=path_info[i].point; points[n]=point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.x > bounds.x2) bounds.x2=point.x; n++; } if (points != (PointInfo *) NULL) { if (n < 2) points=(PointInfo *) RelinquishMagickMemory(points); else { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; polygon_info->number_edges=edge; } } polygon_info->number_edges=edge; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(polygon_info->edges, polygon_info->number_edges,sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { EdgeInfo *edge_info; edge_info=polygon_info->edges+i; edge_info->points=(PointInfo *) ResizeQuantumMemory(edge_info->points, edge_info->number_points,sizeof(*edge_info->points)); if (edge_info->points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } qsort(polygon_info->edges,(size_t) polygon_info->number_edges, sizeof(*polygon_info->edges),DrawCompareEdges); if ((GetLogEventMask() & DrawEvent) != 0) LogPolygonInfo(polygon_info); return(polygon_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P r i m i t i v e T o P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector % path structure. % % The format of the ConvertPrimitiveToPath method is: % % PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o ConvertPrimitiveToPath() returns a vector path structure of type % PathInfo. % % o draw_info: a structure of type DrawInfo. % % o primitive_info: Specifies a pointer to an PrimitiveInfo structure. % */ static void LogPathInfo(const PathInfo *path_info) { const PathInfo *p; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path"); for (p=path_info; p->code != EndCode; p++) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ? "moveto ghostline" : p->code == OpenCode ? "moveto open" : p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" : "?"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path"); } static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { MagickBooleanType closed_subpath; PathInfo *path_info; PathInfoCode code; PointInfo p, q; ssize_t i, n; ssize_t coordinates, start; /* Converts a PrimitiveInfo structure into a vector path structure. */ switch (primitive_info->primitive) { case AlphaPrimitive: case ColorPrimitive: case ImagePrimitive: case PointPrimitive: case TextPrimitive: return((PathInfo *) NULL); default: break; } for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL), sizeof(*path_info)); if (path_info == (PathInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PathInfo *) NULL); } coordinates=0; closed_subpath=MagickFalse; n=0; p.x=(-1.0); p.y=(-1.0); q.x=(-1.0); q.y=(-1.0); start=0; for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { code=LineToCode; if (coordinates <= 0) { /* New subpath. */ coordinates=(ssize_t) primitive_info[i].coordinates; p=primitive_info[i].point; start=n; code=MoveToCode; closed_subpath=primitive_info[i].closed_subpath; } coordinates--; if ((code == MoveToCode) || (coordinates <= 0) || (fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) || (fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon)) { /* Eliminate duplicate points. */ path_info[n].code=code; path_info[n].point=primitive_info[i].point; q=primitive_info[i].point; n++; } if (coordinates > 0) continue; /* next point in current subpath */ if (closed_subpath != MagickFalse) { closed_subpath=MagickFalse; continue; } /* Mark the p point as open if the subpath is not closed. */ path_info[start].code=OpenCode; path_info[n].code=GhostlineCode; path_info[n].point=primitive_info[i].point; n++; path_info[n].code=LineToCode; path_info[n].point=p; n++; } path_info[n].code=EndCode; path_info[n].point.x=0.0; path_info[n].point.y=0.0; if (IsEventLogging() != MagickFalse) LogPathInfo(path_info); path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1), sizeof(*path_info)); return(path_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyDrawInfo() deallocates memory associated with an DrawInfo structure. % % The format of the DestroyDrawInfo method is: % % DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) % % A description of each parameter follows: % % o draw_info: the draw info. % */ MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) { assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (draw_info->id != (char *) NULL) draw_info->id=DestroyString(draw_info->id); if (draw_info->primitive != (char *) NULL) draw_info->primitive=DestroyString(draw_info->primitive); if (draw_info->text != (char *) NULL) draw_info->text=DestroyString(draw_info->text); if (draw_info->geometry != (char *) NULL) draw_info->geometry=DestroyString(draw_info->geometry); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern); if (draw_info->font != (char *) NULL) draw_info->font=DestroyString(draw_info->font); if (draw_info->metrics != (char *) NULL) draw_info->metrics=DestroyString(draw_info->metrics); if (draw_info->family != (char *) NULL) draw_info->family=DestroyString(draw_info->family); if (draw_info->encoding != (char *) NULL) draw_info->encoding=DestroyString(draw_info->encoding); if (draw_info->density != (char *) NULL) draw_info->density=DestroyString(draw_info->density); if (draw_info->server_name != (char *) NULL) draw_info->server_name=(char *) RelinquishMagickMemory(draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) draw_info->dash_pattern=(double *) RelinquishMagickMemory( draw_info->dash_pattern); if (draw_info->gradient.stops != (StopInfo *) NULL) draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory( draw_info->gradient.stops); if (draw_info->clip_mask != (char *) NULL) draw_info->clip_mask=DestroyString(draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask); if (draw_info->composite_mask != (Image *) NULL) draw_info->composite_mask=DestroyImage(draw_info->composite_mask); draw_info->signature=(~MagickCoreSignature); draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w A f f i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawAffineImage() composites the source over the destination image as % dictated by the affine transform. % % The format of the DrawAffineImage method is: % % MagickBooleanType DrawAffineImage(Image *image,const Image *source, % const AffineMatrix *affine,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o source: the source image. % % o affine: the affine transform. % % o exception: return any errors or warnings in this structure. % */ static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine, const double y,const SegmentInfo *edge) { double intercept, z; double x; SegmentInfo inverse_edge; /* Determine left and right edges. */ inverse_edge.x1=edge->x1; inverse_edge.y1=edge->y1; inverse_edge.x2=edge->x2; inverse_edge.y2=edge->y2; z=affine->ry*y+affine->tx; if (affine->sx >= MagickEpsilon) { intercept=(-z/affine->sx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->sx < -MagickEpsilon) { intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->sx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns)) { inverse_edge.x2=edge->x1; return(inverse_edge); } /* Determine top and bottom edges. */ z=affine->sy*y+affine->ty; if (affine->rx >= MagickEpsilon) { intercept=(-z/affine->rx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->rx < -MagickEpsilon) { intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->rx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows)) { inverse_edge.x2=edge->x2; return(inverse_edge); } return(inverse_edge); } static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine) { AffineMatrix inverse_affine; double determinant; determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx* affine->ry); inverse_affine.sx=determinant*affine->sy; inverse_affine.rx=determinant*(-affine->rx); inverse_affine.ry=determinant*(-affine->ry); inverse_affine.sy=determinant*affine->sx; inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty* inverse_affine.ry; inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty* inverse_affine.sy; return(inverse_affine); } MagickExport MagickBooleanType DrawAffineImage(Image *image, const Image *source,const AffineMatrix *affine,ExceptionInfo *exception) { AffineMatrix inverse_affine; CacheView *image_view, *source_view; MagickBooleanType status; PixelInfo zero; PointInfo extent[4], min, max; ssize_t i; SegmentInfo edge; ssize_t start, stop, y; /* Determine bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(source != (const Image *) NULL); assert(source->signature == MagickCoreSignature); assert(affine != (AffineMatrix *) NULL); extent[0].x=0.0; extent[0].y=0.0; extent[1].x=(double) source->columns-1.0; extent[1].y=0.0; extent[2].x=(double) source->columns-1.0; extent[2].y=(double) source->rows-1.0; extent[3].x=0.0; extent[3].y=(double) source->rows-1.0; for (i=0; i < 4; i++) { PointInfo point; point=extent[i]; extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx; extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } /* Affine transform image. */ if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; edge.x1=MagickMax(min.x,0.0); edge.y1=MagickMax(min.y,0.0); edge.x2=MagickMin(max.x,(double) image->columns-1.0); edge.y2=MagickMin(max.y,(double) image->rows-1.0); inverse_affine=InverseAffineMatrix(affine); GetPixelInfo(image,&zero); start=CastDoubleToLong(ceil(edge.y1-0.5)); stop=CastDoubleToLong(floor(edge.y2+0.5)); source_view=AcquireVirtualCacheView(source,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,image,stop-start,1) #endif for (y=start; y <= stop; y++) { PixelInfo composite, pixel; PointInfo point; ssize_t x; Quantum *magick_restrict q; SegmentInfo inverse_edge; ssize_t x_offset; if (status == MagickFalse) continue; inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge); if (inverse_edge.x2 < inverse_edge.x1) continue; q=GetCacheViewAuthenticPixels(image_view,CastDoubleToLong( ceil(inverse_edge.x1-0.5)),y,(size_t) CastDoubleToLong(floor( inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),1,exception); if (q == (Quantum *) NULL) continue; pixel=zero; composite=zero; x_offset=0; for (x=CastDoubleToLong(ceil(inverse_edge.x1-0.5)); x <= CastDoubleToLong(floor(inverse_edge.x2+0.5)); x++) { point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+ inverse_affine.tx; point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+ inverse_affine.ty; status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel, point.x,point.y,&pixel,exception); if (status == MagickFalse) break; GetPixelInfoPixel(image,q,&composite); CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha, &composite); SetPixelViaPixelInfo(image,&composite,q); x_offset++; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w B o u n d i n g R e c t a n g l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawBoundingRectangles() draws the bounding rectangles on the image. This % is only useful for developers debugging the rendering algorithm. % % The format of the DrawBoundingRectangles method is: % % MagickBooleanType DrawBoundingRectangles(Image *image, % const DrawInfo *draw_info,PolygonInfo *polygon_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o polygon_info: Specifies a pointer to a PolygonInfo structure. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType DrawBoundingRectangles(Image *image, const DrawInfo *draw_info,const PolygonInfo *polygon_info, ExceptionInfo *exception) { double mid; DrawInfo *clone_info; MagickStatusType status; PointInfo end, resolution, start; PrimitiveInfo primitive_info[6]; ssize_t i; SegmentInfo bounds; ssize_t coordinates; (void) memset(primitive_info,0,sizeof(primitive_info)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill, exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } resolution.x=96.0; resolution.y=96.0; if (clone_info->density != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(clone_info->density,&geometry_info); if ((flags & RhoValue) != 0) resolution.x=geometry_info.rho; resolution.y=resolution.x; if ((flags & SigmaValue) != 0) resolution.y=geometry_info.sigma; } mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)* clone_info->stroke_width/2.0; bounds.x1=0.0; bounds.y1=0.0; bounds.x2=0.0; bounds.y2=0.0; if (polygon_info != (PolygonInfo *) NULL) { bounds=polygon_info->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1) bounds.x1=polygon_info->edges[i].bounds.x1; if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1) bounds.y1=polygon_info->edges[i].bounds.y1; if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2) bounds.x2=polygon_info->edges[i].bounds.x2; if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2) bounds.y2=polygon_info->edges[i].bounds.y2; } bounds.x1-=mid; bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns ? (double) image->columns-1 : bounds.x1; bounds.y1-=mid; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows ? (double) image->rows-1 : bounds.y1; bounds.x2+=mid; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns ? (double) image->columns-1 : bounds.x2; bounds.y2+=mid; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows ? (double) image->rows-1 : bounds.y2; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].direction != 0) status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke, exception); else status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke, exception); if (status == MagickFalse) break; start.x=(double) (polygon_info->edges[i].bounds.x1-mid); start.y=(double) (polygon_info->edges[i].bounds.y1-mid); end.x=(double) (polygon_info->edges[i].bounds.x2+mid); end.y=(double) (polygon_info->edges[i].bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info,exception); if (status == MagickFalse) break; } if (i < (ssize_t) polygon_info->number_edges) { clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } } status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke, exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } start.x=(double) (bounds.x1-mid); start.y=(double) (bounds.y1-mid); end.x=(double) (bounds.x2+mid); end.y=(double) (bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info,exception); clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClipPath() draws the clip path on the image mask. % % The format of the DrawClipPath method is: % % MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info, % const char *id,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawClipPath(Image *image, const DrawInfo *draw_info,const char *id,ExceptionInfo *exception) { const char *clip_path; Image *clipping_mask; MagickBooleanType status; clip_path=GetImageArtifact(image,id); if (clip_path == (const char *) NULL) return(MagickFalse); clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path, exception); if (clipping_mask == (Image *) NULL) return(MagickFalse); status=SetImageMask(image,WritePixelMask,clipping_mask,exception); clipping_mask=DestroyImage(clipping_mask); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p p i n g M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClippingMask() draws the clip path and returns it as an image clipping % mask. % % The format of the DrawClippingMask method is: % % Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *clip_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o clip_path: the clip path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, const char *id,const char *clip_path,ExceptionInfo *exception) { DrawInfo *clone_info; Image *clip_mask, *separate_mask; MagickStatusType status; /* Draw a clip path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(draw_info != (const DrawInfo *) NULL); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); clip_mask=AcquireImage((const ImageInfo *) NULL,exception); status=SetImageExtent(clip_mask,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(clip_mask)); status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception); status=QueryColorCompliance("#0000",AllCompliance, &clip_mask->background_color,exception); clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha; clip_mask->background_color.alpha_trait=BlendPixelTrait; status=SetImageBackgroundColor(clip_mask,exception); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,clip_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); if (clone_info->clip_mask != (char *) NULL) clone_info->clip_mask=DestroyString(clone_info->clip_mask); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->alpha=OpaqueAlpha; clone_info->clip_path=MagickTrue; status=RenderMVGContent(clip_mask,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); separate_mask=SeparateImage(clip_mask,AlphaChannel,exception); if (separate_mask == (Image *) NULL) status=MagickFalse; else { clip_mask=DestroyImage(clip_mask); clip_mask=separate_mask; status&=NegateImage(clip_mask,MagickFalse,exception); } if (status == MagickFalse) clip_mask=DestroyImage(clip_mask); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path"); return(clip_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C o m p o s i t e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawCompositeMask() draws the mask path and returns it as an image mask. % % The format of the DrawCompositeMask method is: % % Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *mask_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the mask path id. % % o mask_path: the mask path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, const char *id,const char *mask_path,ExceptionInfo *exception) { Image *composite_mask, *separate_mask; DrawInfo *clone_info; MagickStatusType status; /* Draw a mask path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(draw_info != (const DrawInfo *) NULL); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); composite_mask=AcquireImage((const ImageInfo *) NULL,exception); status=SetImageExtent(composite_mask,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(composite_mask)); status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL, exception); status=QueryColorCompliance("#0000",AllCompliance, &composite_mask->background_color,exception); composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha; composite_mask->background_color.alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(composite_mask,exception); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,mask_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->alpha=OpaqueAlpha; status=RenderMVGContent(composite_mask,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); separate_mask=SeparateImage(composite_mask,AlphaChannel,exception); if (separate_mask != (Image *) NULL) { composite_mask=DestroyImage(composite_mask); composite_mask=separate_mask; status=NegateImage(composite_mask,MagickFalse,exception); if (status == MagickFalse) composite_mask=DestroyImage(composite_mask); } if (status == MagickFalse) composite_mask=DestroyImage(composite_mask); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path"); return(composite_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w D a s h P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the % image while respecting the dash offset and dash pattern attributes. % % The format of the DrawDashPolygon method is: % % MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception) { double length, maximum_length, offset, scale, total_length; DrawInfo *clone_info; MagickStatusType status; PrimitiveInfo *dash_polygon; double dx, dy; ssize_t i; size_t number_vertices; ssize_t j, n; assert(draw_info != (const DrawInfo *) NULL); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash"); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; number_vertices=(size_t) i; dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (2UL*number_vertices+32UL),sizeof(*dash_polygon)); if (dash_polygon == (PrimitiveInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } (void) memset(dash_polygon,0,(2UL*number_vertices+32UL)* sizeof(*dash_polygon)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->miterlimit=0; dash_polygon[0]=primitive_info[0]; scale=ExpandAffine(&draw_info->affine); length=scale*draw_info->dash_pattern[0]; offset=fabs(draw_info->dash_offset) >= MagickEpsilon ? scale*draw_info->dash_offset : 0.0; j=1; for (n=0; offset > 0.0; j=0) { if (draw_info->dash_pattern[n] <= 0.0) break; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); if (offset > length) { offset-=length; n++; length=scale*draw_info->dash_pattern[n]; continue; } if (offset < length) { length-=offset; offset=0.0; break; } offset=0.0; n++; } status=MagickTrue; maximum_length=0.0; total_length=0.0; for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++) { dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot(dx,dy); if (maximum_length > (double) (MaxBezierCoordinates >> 2)) continue; if (fabs(length) < MagickEpsilon) { if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); ) { total_length+=length; if ((n & 0x01) != 0) { dash_polygon[0]=primitive_info[0]; dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); j=1; } else { if ((j+1) > (ssize_t) number_vertices) break; dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); if (status == MagickFalse) break; } if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } length-=(maximum_length-total_length); if ((n & 0x01) != 0) continue; dash_polygon[j]=primitive_info[i]; dash_polygon[j].coordinates=1; j++; } if ((status != MagickFalse) && (total_length < maximum_length) && ((n & 0x01) == 0) && (j > 1)) { dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x+=MagickEpsilon; dash_polygon[j].point.y+=MagickEpsilon; dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); } dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon); clone_info=DestroyDrawInfo(clone_info); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGradientImage() draws a linear gradient on the image. % % The format of the DrawGradientImage method is: % % MagickBooleanType DrawGradientImage(Image *image, % const DrawInfo *draw_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static inline double GetStopColorOffset(const GradientInfo *gradient, const ssize_t x,const ssize_t y) { switch (gradient->type) { case UndefinedGradient: case LinearGradient: { double gamma, length, offset, scale; PointInfo p, q; const SegmentInfo *gradient_vector; gradient_vector=(&gradient->gradient_vector); p.x=gradient_vector->x2-gradient_vector->x1; p.y=gradient_vector->y2-gradient_vector->y1; q.x=(double) x-gradient_vector->x1; q.y=(double) y-gradient_vector->y1; length=sqrt(q.x*q.x+q.y*q.y); gamma=sqrt(p.x*p.x+p.y*p.y)*length; gamma=PerceptibleReciprocal(gamma); scale=p.x*q.x+p.y*q.y; offset=gamma*scale*length; return(offset); } case RadialGradient: { PointInfo v; if (gradient->spread == RepeatSpread) { v.x=(double) x-gradient->center.x; v.y=(double) y-gradient->center.y; return(sqrt(v.x*v.x+v.y*v.y)); } v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians( gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.x); v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians( gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.y); return(sqrt(v.x*v.x+v.y*v.y)); } } return(0.0); } static int StopInfoCompare(const void *x,const void *y) { StopInfo *stop_1, *stop_2; stop_1=(StopInfo *) x; stop_2=(StopInfo *) y; if (stop_1->offset > stop_2->offset) return(1); if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon) return(0); return(-1); } MagickExport MagickBooleanType DrawGradientImage(Image *image, const DrawInfo *draw_info,ExceptionInfo *exception) { CacheView *image_view; const GradientInfo *gradient; const SegmentInfo *gradient_vector; double length; MagickBooleanType status; PixelInfo zero; PointInfo point; RectangleInfo bounding_box; ssize_t y; /* Draw linear or radial gradient on image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(draw_info != (const DrawInfo *) NULL); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); gradient=(&draw_info->gradient); qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo), StopInfoCompare); gradient_vector=(&gradient->gradient_vector); point.x=gradient_vector->x2-gradient_vector->x1; point.y=gradient_vector->y2-gradient_vector->y1; length=sqrt(point.x*point.x+point.y*point.y); bounding_box=gradient->bounding_box; status=MagickTrue; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,bounding_box.height-bounding_box.y,1) #endif for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++) { double alpha, offset; PixelInfo composite, pixel; Quantum *magick_restrict q; ssize_t i, x; ssize_t j; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; composite=zero; offset=GetStopColorOffset(gradient,0,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++) { GetPixelInfoPixel(image,q,&pixel); switch (gradient->spread) { case UndefinedSpread: case PadSpread: { if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) || (y != CastDoubleToLong(ceil(gradient_vector->y1-0.5)))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if ((offset < 0.0) || (i == 0)) composite=gradient->stops[0].color; else if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops)) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case ReflectSpread: { if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) || (y != CastDoubleToLong(ceil(gradient_vector->y1-0.5)))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } if (offset < 0.0) offset=(-offset); if ((ssize_t) fmod(offset,2.0) == 0) offset=fmod(offset,1.0); else offset=1.0-fmod(offset,1.0); for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case RepeatSpread: { double repeat; MagickBooleanType antialias; antialias=MagickFalse; repeat=0.0; if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) || (y != CastDoubleToLong(ceil(gradient_vector->y1-0.5)))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type == LinearGradient) { repeat=fmod(offset,length); if (repeat < 0.0) repeat=length-fmod(-repeat,length); else repeat=fmod(offset,length); antialias=(repeat < length) && ((repeat+1.0) > length) ? MagickTrue : MagickFalse; offset=PerceptibleReciprocal(length)*repeat; } else { repeat=fmod(offset,gradient->radius); if (repeat < 0.0) repeat=gradient->radius-fmod(-repeat,gradient->radius); else repeat=fmod(offset,gradient->radius); antialias=repeat+1.0 > gradient->radius ? MagickTrue : MagickFalse; offset=repeat*PerceptibleReciprocal(gradient->radius); } } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); if (antialias != MagickFalse) { if (gradient->type == LinearGradient) alpha=length-repeat; else alpha=gradient->radius-repeat; i=0; j=(ssize_t) gradient->number_stops-1L; } CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } } CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha, &pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawImage() draws a graphic primitive on your image. The primitive % may be represented as a string or filename. Precede the filename with an % "at" sign (@) and the contents of the file are drawn on the image. You % can affect how text is drawn by setting one or more members of the draw % info structure. % % The format of the DrawImage method is: % % MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info, const double pad) { char *text = (char *) NULL; double extent; size_t quantum; ssize_t i; /* Check if there is enough storage for drawing primitives. */ quantum=sizeof(**mvg_info->primitive_info); extent=(double) mvg_info->offset+pad+(PrimitiveExtentPad+1)*(double) quantum; if (extent <= (double) *mvg_info->extent) return(MagickTrue); if ((extent >= (double) MAGICK_SSIZE_MAX) || (IsNaN(extent) != 0)) return(MagickFalse); for (i=0; i < mvg_info->offset; i++) if (((*mvg_info->primitive_info)[i].primitive == TextPrimitive) || ((*mvg_info->primitive_info)[i].primitive == ImagePrimitive)) if ((*mvg_info->primitive_info)[i].text != (char *) NULL) text=(*mvg_info->primitive_info)[i].text; *mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory( *mvg_info->primitive_info,(size_t) (extent+1),quantum); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) { *mvg_info->extent=(size_t) extent; for (i=mvg_info->offset+1; i <= (ssize_t) extent; i++) { (*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive; (*mvg_info->primitive_info)[i].text=(char *) NULL; } return(MagickTrue); } /* Reallocation failed, allocate a primitive to facilitate unwinding. */ (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); *mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory((size_t) (PrimitiveExtentPad+1)*quantum); (void) memset(*mvg_info->primitive_info,0,(size_t) ((PrimitiveExtentPad+1)* quantum)); *mvg_info->extent=1; (*mvg_info->primitive_info)[0].text=text; mvg_info->offset=0; return(MagickFalse); } static inline double GetDrawValue(const char *magick_restrict string, char **magick_restrict sentinal) { char **magick_restrict q; double value; q=sentinal; value=InterpretLocaleValue(string,q); sentinal=q; return(value); } static int MVGMacroCompare(const void *target,const void *source) { const char *p, *q; p=(const char *) target; q=(const char *) source; return(strcmp(p,q)); } static SplayTreeInfo *GetMVGMacros(const char *primitive) { char *macro, *token; const char *q; size_t extent; SplayTreeInfo *macros; /* Scan graphic primitives for definitions and classes. */ if (primitive == (const char *) NULL) return((SplayTreeInfo *) NULL); macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory, RelinquishMagickMemory); macro=AcquireString(primitive); token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; for (q=primitive; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (*token == '\0') break; if (LocaleCompare("push",token) == 0) { const char *end, *start; (void) GetNextToken(q,&q,extent,token); if (*q == '"') { char name[MagickPathExtent]; const char *p; ssize_t n; /* Named macro (e.g. push graphic-context "wheel"). */ (void) GetNextToken(q,&q,extent,token); start=q; end=q; (void) CopyMagickString(name,token,MagickPathExtent); n=1; for (p=q; *p != '\0'; ) { if (GetNextToken(p,&p,extent,token) < 1) break; if (*token == '\0') break; if (LocaleCompare(token,"pop") == 0) { end=p-strlen(token)-1; n--; } if (LocaleCompare(token,"push") == 0) n++; if ((n == 0) && (end > start)) { /* Extract macro. */ (void) GetNextToken(p,&p,extent,token); (void) CopyMagickString(macro,start,(size_t) (end-start)); (void) AddValueToSplayTree(macros,ConstantString(name), ConstantString(macro)); break; } } } } } token=DestroyString(token); macro=DestroyString(macro); return(macros); } static inline MagickBooleanType IsPoint(const char *point) { char *p; double value; value=GetDrawValue(point,&p); return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse : MagickTrue); } static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info, const PointInfo point) { primitive_info->coordinates=1; primitive_info->closed_subpath=MagickFalse; primitive_info->point=point; return(MagickTrue); } static MagickBooleanType RenderMVGContent(Image *image, const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception) { #define RenderImageTag "Render/Image" AffineMatrix affine, current; char keyword[MagickPathExtent], geometry[MagickPathExtent], *next_token, pattern[MagickPathExtent], *primitive, *token; const char *q; double angle, coordinates, cursor, factor, primitive_extent; DrawInfo *clone_info, **graphic_context; MagickBooleanType proceed; MagickStatusType status; MVGInfo mvg_info; PointInfo point; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; const char *p; ssize_t i, x; SegmentInfo bounds; size_t extent, number_points, number_stops; SplayTreeInfo *macros; ssize_t defsDepth, j, k, n, symbolDepth; StopInfo *stops; TypeMetric metrics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (depth > MagickMaxRecursionDepth) ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply", image->filename); if ((draw_info->primitive == (char *) NULL) || (*draw_info->primitive == '\0')) return(MagickFalse); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image"); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) { status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); if (status == MagickFalse) return(MagickFalse); } if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) && (*(draw_info->primitive+1) != '-') && (depth == 0)) primitive=FileToString(draw_info->primitive+1,~0UL,exception); else primitive=AcquireString(draw_info->primitive); if (primitive == (char *) NULL) return(MagickFalse); primitive_extent=(double) strlen(primitive); (void) SetImageArtifact(image,"mvg:vector-graphics",primitive); n=0; number_stops=0; stops=(StopInfo *) NULL; /* Allocate primitive info memory. */ graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { primitive=DestroyString(primitive); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } number_points=(size_t) PrimitiveExtentPad; primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (number_points+1),sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(primitive_info,0,(size_t) (number_points+1)* sizeof(*primitive_info)); (void) memset(&mvg_info,0,sizeof(mvg_info)); mvg_info.primitive_info=(&primitive_info); mvg_info.extent=(&number_points); mvg_info.exception=exception; graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info); graphic_context[n]->viewbox=image->page; if ((image->page.width == 0) || (image->page.height == 0)) { graphic_context[n]->viewbox.width=image->columns; graphic_context[n]->viewbox.height=image->rows; } token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; defsDepth=0; symbolDepth=0; cursor=0.0; macros=GetMVGMacros(primitive); status=MagickTrue; for (q=primitive; *q != '\0'; ) { /* Interpret graphic primitive. */ if (GetNextToken(q,&q,MagickPathExtent,keyword) < 1) break; if (*keyword == '\0') break; if (*keyword == '#') { /* Comment. */ while ((*q != '\n') && (*q != '\0')) q++; continue; } p=q-strlen(keyword)-1; primitive_type=UndefinedPrimitive; current=graphic_context[n]->affine; GetAffineMatrix(&affine); *token='\0'; switch (*keyword) { case ';': break; case 'a': case 'A': { if (LocaleCompare("affine",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.sx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.rx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ry=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.sy=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.tx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ty=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("alpha",keyword) == 0) { primitive_type=AlphaPrimitive; break; } if (LocaleCompare("arc",keyword) == 0) { primitive_type=ArcPrimitive; break; } status=MagickFalse; break; } case 'b': case 'B': { if (LocaleCompare("bezier",keyword) == 0) { primitive_type=BezierPrimitive; break; } if (LocaleCompare("border-color",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->border_color,exception); break; } status=MagickFalse; break; } case 'c': case 'C': { if (LocaleCompare("class",keyword) == 0) { const char *mvg_class; (void) GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } if (LocaleCompare(token,graphic_context[n]->id) == 0) break; mvg_class=(const char *) GetValueFromSplayTree(macros,token); if ((graphic_context[n]->render != MagickFalse) && (mvg_class != (const char *) NULL) && (p > primitive)) { char *elements; ssize_t offset; /* Inject class elements in stream. */ offset=(ssize_t) (p-primitive); elements=AcquireString(primitive); elements[offset]='\0'; (void) ConcatenateString(&elements,mvg_class); (void) ConcatenateString(&elements,"\n"); (void) ConcatenateString(&elements,q); primitive=DestroyString(primitive); primitive=elements; q=primitive+offset; } break; } if (LocaleCompare("clip-path",keyword) == 0) { const char *clip_path; /* Take a node from within the MVG document, and duplicate it here. */ (void) GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } (void) CloneString(&graphic_context[n]->clip_mask,token); clip_path=(const char *) GetValueFromSplayTree(macros,token); if (clip_path != (const char *) NULL) { if (graphic_context[n]->clipping_mask != (Image *) NULL) graphic_context[n]->clipping_mask= DestroyImage(graphic_context[n]->clipping_mask); graphic_context[n]->clipping_mask=DrawClippingMask(image, graphic_context[n],token,clip_path,exception); if (graphic_context[n]->compliance != SVGCompliance) { clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image, graphic_context[n]->clip_mask,clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); } } break; } if (LocaleCompare("clip-rule",keyword) == 0) { ssize_t fill_rule; (void) GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("clip-units",keyword) == 0) { ssize_t clip_units; (void) GetNextToken(q,&q,extent,token); clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse, token); if (clip_units == -1) { status=MagickFalse; break; } graphic_context[n]->clip_units=(ClipPathUnits) clip_units; if (clip_units == ObjectBoundingBox) { GetAffineMatrix(&current); affine.sx=draw_info->bounds.x2; affine.sy=draw_info->bounds.y2; affine.tx=draw_info->bounds.x1; affine.ty=draw_info->bounds.y1; break; } break; } if (LocaleCompare("circle",keyword) == 0) { primitive_type=CirclePrimitive; break; } if (LocaleCompare("color",keyword) == 0) { primitive_type=ColorPrimitive; break; } if (LocaleCompare("compliance",keyword) == 0) { /* MVG compliance associates a clipping mask with an image; SVG compliance associates a clipping mask with a graphics context. */ (void) GetNextToken(q,&q,extent,token); graphic_context[n]->compliance=(ComplianceType) ParseCommandOption( MagickComplianceOptions,MagickFalse,token); break; } if (LocaleCompare("currentColor",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } status=MagickFalse; break; } case 'd': case 'D': { if (LocaleCompare("decorate",keyword) == 0) { ssize_t decorate; (void) GetNextToken(q,&q,extent,token); decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse, token); if (decorate == -1) { status=MagickFalse; break; } graphic_context[n]->decorate=(DecorationType) decorate; break; } if (LocaleCompare("density",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->density,token); break; } if (LocaleCompare("direction",keyword) == 0) { ssize_t direction; (void) GetNextToken(q,&q,extent,token); direction=ParseCommandOption(MagickDirectionOptions,MagickFalse, token); if (direction == -1) status=MagickFalse; else graphic_context[n]->direction=(DirectionType) direction; break; } status=MagickFalse; break; } case 'e': case 'E': { if (LocaleCompare("ellipse",keyword) == 0) { primitive_type=EllipsePrimitive; break; } if (LocaleCompare("encoding",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->encoding,token); break; } status=MagickFalse; break; } case 'f': case 'F': { if (LocaleCompare("fill",keyword) == 0) { const char *mvg_class; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; mvg_class=(const char *) GetValueFromSplayTree(macros,token); if (mvg_class != (const char *) NULL) { (void) DrawPatternPath(image,draw_info,mvg_class, &graphic_context[n]->fill_pattern,exception); break; } (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) { (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->fill_pattern,exception); break; } status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->fill,exception); if (graphic_context[n]->fill_alpha != OpaqueAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; break; } if (LocaleCompare("fill-opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) graphic_context[n]->fill_alpha*=opacity; else graphic_context[n]->fill_alpha=QuantumRange*opacity; if (graphic_context[n]->fill.alpha != TransparentAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; else graphic_context[n]->fill.alpha=(MagickRealType) ClampToQuantum(QuantumRange*opacity); graphic_context[n]->fill.alpha_trait=BlendPixelTrait; break; } if (LocaleCompare("fill-rule",keyword) == 0) { ssize_t fill_rule; (void) GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("font",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->font,token); if (LocaleCompare("none",token) == 0) graphic_context[n]->font=(char *) RelinquishMagickMemory( graphic_context[n]->font); break; } if (LocaleCompare("font-family",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->family,token); break; } if (LocaleCompare("font-size",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->pointsize=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("font-stretch",keyword) == 0) { ssize_t stretch; (void) GetNextToken(q,&q,extent,token); stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token); if (stretch == -1) { status=MagickFalse; break; } graphic_context[n]->stretch=(StretchType) stretch; break; } if (LocaleCompare("font-style",keyword) == 0) { ssize_t style; (void) GetNextToken(q,&q,extent,token); style=ParseCommandOption(MagickStyleOptions,MagickFalse,token); if (style == -1) { status=MagickFalse; break; } graphic_context[n]->style=(StyleType) style; break; } if (LocaleCompare("font-weight",keyword) == 0) { ssize_t weight; (void) GetNextToken(q,&q,extent,token); weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(token); graphic_context[n]->weight=(size_t) weight; break; } status=MagickFalse; break; } case 'g': case 'G': { if (LocaleCompare("gradient-units",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("gravity",keyword) == 0) { ssize_t gravity; (void) GetNextToken(q,&q,extent,token); gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token); if (gravity == -1) { status=MagickFalse; break; } graphic_context[n]->gravity=(GravityType) gravity; break; } status=MagickFalse; break; } case 'i': case 'I': { if (LocaleCompare("image",keyword) == 0) { ssize_t compose; primitive_type=ImagePrimitive; (void) GetNextToken(q,&q,extent,token); compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token); if (compose == -1) { status=MagickFalse; break; } graphic_context[n]->compose=(CompositeOperator) compose; break; } if (LocaleCompare("interline-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interline_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("interword-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'k': case 'K': { if (LocaleCompare("kerning",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->kerning=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'l': case 'L': { if (LocaleCompare("letter-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (IsPoint(token) == MagickFalse) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); clone_info->text=AcquireString(" "); status&=GetTypeMetrics(image,clone_info,&metrics,exception); graphic_context[n]->kerning=metrics.width* GetDrawValue(token,&next_token); clone_info=DestroyDrawInfo(clone_info); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("line",keyword) == 0) { primitive_type=LinePrimitive; break; } status=MagickFalse; break; } case 'm': case 'M': { if (LocaleCompare("mask",keyword) == 0) { const char *mask_path; /* Take a node from within the MVG document, and duplicate it here. */ (void) GetNextToken(q,&q,extent,token); mask_path=(const char *) GetValueFromSplayTree(macros,token); if (mask_path != (const char *) NULL) { if (graphic_context[n]->composite_mask != (Image *) NULL) graphic_context[n]->composite_mask= DestroyImage(graphic_context[n]->composite_mask); graphic_context[n]->composite_mask=DrawCompositeMask(image, graphic_context[n],token,mask_path,exception); if (graphic_context[n]->compliance != SVGCompliance) status=SetImageMask(image,CompositePixelMask, graphic_context[n]->composite_mask,exception); } break; } status=MagickFalse; break; } case 'o': case 'O': { if (LocaleCompare("offset",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) { graphic_context[n]->fill_alpha*=opacity; graphic_context[n]->stroke_alpha*=opacity; } else { graphic_context[n]->fill_alpha=QuantumRange*opacity; graphic_context[n]->stroke_alpha=QuantumRange*opacity; } break; } status=MagickFalse; break; } case 'p': case 'P': { if (LocaleCompare("path",keyword) == 0) { primitive_type=PathPrimitive; break; } if (LocaleCompare("point",keyword) == 0) { primitive_type=PointPrimitive; break; } if (LocaleCompare("polyline",keyword) == 0) { primitive_type=PolylinePrimitive; break; } if (LocaleCompare("polygon",keyword) == 0) { primitive_type=PolygonPrimitive; break; } if (LocaleCompare("pop",keyword) == 0) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare("class",token) == 0) break; if (LocaleCompare("clip-path",token) == 0) break; if (LocaleCompare("defs",token) == 0) { defsDepth--; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) break; if (LocaleCompare("graphic-context",token) == 0) { if (n <= 0) { (void) ThrowMagickException(exception,GetMagickModule(), DrawError,"UnbalancedGraphicContextPushPop","`%s'",token); status=MagickFalse; n=0; break; } if ((graphic_context[n]->clip_mask != (char *) NULL) && (graphic_context[n]->compliance != SVGCompliance)) if (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0) status=SetImageMask(image,WritePixelMask,(Image *) NULL, exception); graphic_context[n]=DestroyDrawInfo(graphic_context[n]); n--; break; } if (LocaleCompare("mask",token) == 0) break; if (LocaleCompare("pattern",token) == 0) break; if (LocaleCompare("symbol",token) == 0) { symbolDepth--; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } if (LocaleCompare("push",keyword) == 0) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare("class",token) == 0) { /* Class context. */ for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"class") != 0) continue; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("clip-path",token) == 0) { (void) GetNextToken(q,&q,extent,token); for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"clip-path") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("defs",token) == 0) { defsDepth++; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent], type[MagickPathExtent]; SegmentInfo segment; (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(type,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); segment.x1=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.y1=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.x2=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.y2=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (LocaleCompare(type,"radial") == 0) { (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); } for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"gradient") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); bounds.x1=graphic_context[n]->affine.sx*segment.x1+ graphic_context[n]->affine.ry*segment.y1+ graphic_context[n]->affine.tx; bounds.y1=graphic_context[n]->affine.rx*segment.x1+ graphic_context[n]->affine.sy*segment.y1+ graphic_context[n]->affine.ty; bounds.x2=graphic_context[n]->affine.sx*segment.x2+ graphic_context[n]->affine.ry*segment.y2+ graphic_context[n]->affine.tx; bounds.y2=graphic_context[n]->affine.rx*segment.x2+ graphic_context[n]->affine.sy*segment.y2+ graphic_context[n]->affine.ty; (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-type",name); (void) SetImageArtifact(image,key,type); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%gx%g%+.15g%+.15g", MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0), MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0), bounds.x1,bounds.y1); (void) SetImageArtifact(image,key,geometry); (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("graphic-context",token) == 0) { n++; graphic_context=(DrawInfo **) ResizeQuantumMemory( graphic_context,(size_t) (n+1),sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL, graphic_context[n-1]); if (*q == '"') { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->id,token); } break; } if (LocaleCompare("mask",token) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("pattern",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent]; RectangleInfo region; (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); region.x=CastDoubleToLong(ceil(GetDrawValue(token, &next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); region.y=CastDoubleToLong(ceil(GetDrawValue(token, &next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); region.width=(size_t) CastDoubleToLong(floor(GetDrawValue( token,&next_token)+0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); region.height=(size_t) floor(GetDrawValue(token,&next_token)+ 0.5); if (token == next_token) ThrowPointExpectedException(token,exception); for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"pattern") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) region.width,(double) region.height,(double) region.x,(double) region.y); (void) SetImageArtifact(image,key,geometry); (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("symbol",token) == 0) { symbolDepth++; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } status=MagickFalse; break; } case 'r': case 'R': { if (LocaleCompare("rectangle",keyword) == 0) { primitive_type=RectanglePrimitive; break; } if (LocaleCompare("rotate",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0))); affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0))); affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0)))); affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0))); break; } if (LocaleCompare("roundRectangle",keyword) == 0) { primitive_type=RoundRectanglePrimitive; break; } status=MagickFalse; break; } case 's': case 'S': { if (LocaleCompare("scale",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.sx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.sy=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("skewX",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.ry=sin(DegreesToRadians(angle)); break; } if (LocaleCompare("skewY",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.rx=(-tan(DegreesToRadians(angle)/2.0)); break; } if (LocaleCompare("stop-color",keyword) == 0) { PixelInfo stop_color; number_stops++; if (number_stops == 1) stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops)); else if (number_stops > 2) stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops, sizeof(*stops)); if (stops == (StopInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance,&stop_color, exception); stops[number_stops-1].color=stop_color; (void) GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; stops[number_stops-1].offset=factor*GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("stroke",keyword) == 0) { const char *mvg_class; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; mvg_class=(const char *) GetValueFromSplayTree(macros,token); if (mvg_class != (const char *) NULL) { (void) DrawPatternPath(image,draw_info,mvg_class, &graphic_context[n]->stroke_pattern,exception); break; } (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) { (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->stroke_pattern,exception); break; } status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->stroke,exception); if (graphic_context[n]->stroke_alpha != OpaqueAlpha) graphic_context[n]->stroke.alpha= graphic_context[n]->stroke_alpha; break; } if (LocaleCompare("stroke-antialias",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("stroke-dasharray",keyword) == 0) { if (graphic_context[n]->dash_pattern != (double *) NULL) graphic_context[n]->dash_pattern=(double *) RelinquishMagickMemory(graphic_context[n]->dash_pattern); if (IsPoint(q) != MagickFalse) { const char *r; r=q; (void) GetNextToken(r,&r,extent,token); if (*token == ',') (void) GetNextToken(r,&r,extent,token); for (x=0; IsPoint(token) != MagickFalse; x++) { (void) GetNextToken(r,&r,extent,token); if (*token == ',') (void) GetNextToken(r,&r,extent,token); } graphic_context[n]->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*graphic_context[n]->dash_pattern)); if (graphic_context[n]->dash_pattern == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); status=MagickFalse; break; } (void) memset(graphic_context[n]->dash_pattern,0,(size_t) (2*x+2)*sizeof(*graphic_context[n]->dash_pattern)); for (j=0; j < x; j++) { (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->dash_pattern[j]=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->dash_pattern[j] < 0.0) status=MagickFalse; } if ((x & 0x01) != 0) for ( ; j < (2*x); j++) graphic_context[n]->dash_pattern[j]= graphic_context[n]->dash_pattern[j-x]; graphic_context[n]->dash_pattern[j]=0.0; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("stroke-dashoffset",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->dash_offset=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("stroke-linecap",keyword) == 0) { ssize_t linecap; (void) GetNextToken(q,&q,extent,token); linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token); if (linecap == -1) { status=MagickFalse; break; } graphic_context[n]->linecap=(LineCap) linecap; break; } if (LocaleCompare("stroke-linejoin",keyword) == 0) { ssize_t linejoin; (void) GetNextToken(q,&q,extent,token); linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse, token); if (linejoin == -1) { status=MagickFalse; break; } graphic_context[n]->linejoin=(LineJoin) linejoin; break; } if (LocaleCompare("stroke-miterlimit",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->miterlimit=StringToUnsignedLong(token); break; } if (LocaleCompare("stroke-opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) graphic_context[n]->stroke_alpha*=opacity; else graphic_context[n]->stroke_alpha=QuantumRange*opacity; if (graphic_context[n]->stroke.alpha != TransparentAlpha) graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha; else graphic_context[n]->stroke.alpha=(MagickRealType) ClampToQuantum(QuantumRange*opacity); graphic_context[n]->stroke.alpha_trait=BlendPixelTrait; break; } if (LocaleCompare("stroke-width",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; graphic_context[n]->stroke_width=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 't': case 'T': { if (LocaleCompare("text",keyword) == 0) { primitive_type=TextPrimitive; cursor=0.0; break; } if (LocaleCompare("text-align",keyword) == 0) { ssize_t align; (void) GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-anchor",keyword) == 0) { ssize_t align; (void) GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-antialias",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->text_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("text-undercolor",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->undercolor,exception); break; } if (LocaleCompare("translate",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.tx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ty=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); cursor=0.0; break; } status=MagickFalse; break; } case 'u': case 'U': { if (LocaleCompare("use",keyword) == 0) { const char *use; /* Get a macro from the MVG document, and "use" it here. */ (void) GetNextToken(q,&q,extent,token); use=(const char *) GetValueFromSplayTree(macros,token); if (use != (const char *) NULL) { clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); (void) CloneString(&clone_info->primitive,use); status=RenderMVGContent(image,clone_info,depth+1,exception); clone_info=DestroyDrawInfo(clone_info); } break; } status=MagickFalse; break; } case 'v': case 'V': { if (LocaleCompare("viewbox",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.x=CastDoubleToLong(ceil( GetDrawValue(token,&next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.y=CastDoubleToLong(ceil( GetDrawValue(token,&next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.width=(size_t) CastDoubleToLong( floor(GetDrawValue(token,&next_token)+0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.height=(size_t) CastDoubleToLong( floor(GetDrawValue(token,&next_token)+0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'w': case 'W': { if (LocaleCompare("word-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } default: { status=MagickFalse; break; } } if (status == MagickFalse) break; if ((fabs(affine.sx-1.0) >= MagickEpsilon) || (fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) || (fabs(affine.sy-1.0) >= MagickEpsilon) || (fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon)) { graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx; graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx; graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy; graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy; graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+ current.tx; graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+ current.ty; } if (primitive_type == UndefinedPrimitive) { if (*q == '\0') { if (number_stops > 1) { GradientType type; type=LinearGradient; if (draw_info->gradient.type == RadialGradient) type=RadialGradient; (void) GradientImage(image,type,PadSpread,stops,number_stops, exception); } if (number_stops > 0) stops=(StopInfo *) RelinquishMagickMemory(stops); } if ((draw_info->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),p); continue; } /* Parse the primitive attributes. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); i=0; mvg_info.offset=i; j=0; primitive_info[0].point.x=0.0; primitive_info[0].point.y=0.0; primitive_info[0].coordinates=0; primitive_info[0].method=FloodfillMethod; primitive_info[0].closed_subpath=MagickFalse; for (x=0; *q != '\0'; x++) { /* Define points. */ if (IsPoint(q) == MagickFalse) break; (void) GetNextToken(q,&q,extent,token); point.x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); point.y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,(const char **) NULL,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); primitive_info[i].primitive=primitive_type; primitive_info[i].point=point; primitive_info[i].coordinates=0; primitive_info[i].method=FloodfillMethod; primitive_info[i].closed_subpath=MagickFalse; i++; mvg_info.offset=i; if (i < (ssize_t) number_points) continue; status&=CheckPrimitiveExtent(&mvg_info,(double) number_points); } if (status == MagickFalse) break; if ((primitive_info[j].primitive == TextPrimitive) || (primitive_info[j].primitive == ImagePrimitive)) if (primitive_info[j].text != (char *) NULL) primitive_info[j].text=DestroyString(primitive_info[j].text); primitive_info[j].primitive=primitive_type; primitive_info[j].coordinates=(size_t) x; primitive_info[j].method=FloodfillMethod; primitive_info[j].closed_subpath=MagickFalse; /* Circumscribe primitive within a circle. */ bounds.x1=primitive_info[j].point.x; bounds.y1=primitive_info[j].point.y; bounds.x2=primitive_info[j].point.x; bounds.y2=primitive_info[j].point.y; for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++) { point=primitive_info[j+k].point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.y < bounds.y1) bounds.y1=point.y; if (point.x > bounds.x2) bounds.x2=point.x; if (point.y > bounds.y2) bounds.y2=point.y; } /* Speculate how many points our primitive might consume. */ coordinates=(double) primitive_info[j].coordinates; switch (primitive_type) { case RectanglePrimitive: { coordinates*=5.0; break; } case RoundRectanglePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot(alpha,beta); coordinates*=5.0; coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0* BezierQuantum+360.0; break; } case BezierPrimitive: { coordinates=(BezierQuantum*(double) primitive_info[j].coordinates); break; } case PathPrimitive: { char *s, *t; (void) GetNextToken(q,&q,extent,token); coordinates=1.0; t=token; for (s=token; *s != '\0'; s=t) { double value; value=GetDrawValue(s,&t); (void) value; if (s == t) { t++; continue; } coordinates++; } for (s=token; *s != '\0'; s++) if (strspn(s,"AaCcQqSsTt") != 0) coordinates+=(20.0*BezierQuantum)+360.0; break; } default: break; } if (status == MagickFalse) break; if (((size_t) (i+coordinates)) >= number_points) { /* Resize based on speculative points required by primitive. */ number_points+=coordinates+1; if (number_points < (size_t) coordinates) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } mvg_info.offset=i; status&=CheckPrimitiveExtent(&mvg_info,(double) number_points); } status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad); if (status == MagickFalse) break; mvg_info.offset=j; switch (primitive_type) { case PointPrimitive: default: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } status&=TracePoint(primitive_info+j,primitive_info[j].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case LinePrimitive: { double dx, dy, maximum_length; if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot(dx,dy); if (maximum_length > (MaxBezierCoordinates/100.0)) ThrowPointExpectedException(keyword,exception); status&=TraceLine(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RectanglePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceRectangle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RoundRectanglePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+2].point.x < 0.0) || (primitive_info[j+2].point.y < 0.0)) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0) { status=MagickFalse; break; } if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0) { status=MagickFalse; break; } status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case ArcPrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } status&=TraceArc(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case EllipsePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x < 0.0) || (primitive_info[j+1].point.y < 0.0)) { status=MagickFalse; break; } status&=TraceEllipse(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case CirclePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceCircle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PolylinePrimitive: { if (primitive_info[j].coordinates < 1) { status=MagickFalse; break; } break; } case PolygonPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } primitive_info[i]=primitive_info[j]; primitive_info[i].coordinates=0; primitive_info[j].coordinates++; primitive_info[j].closed_subpath=MagickTrue; i++; break; } case BezierPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } status&=TraceBezier(&mvg_info,primitive_info[j].coordinates); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PathPrimitive: { coordinates=(double) TracePath(&mvg_info,token,exception); if (coordinates < 0.0) { status=MagickFalse; break; } i=(ssize_t) (j+coordinates); break; } case AlphaPrimitive: case ColorPrimitive: { ssize_t method; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); method=ParseCommandOption(MagickMethodOptions,MagickFalse,token); if (method == -1) { status=MagickFalse; break; } primitive_info[j].method=(PaintMethod) method; break; } case TextPrimitive: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } if (*token != ',') (void) GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); /* Compute text cursor offset. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) && (fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon)) { mvg_info.point=primitive_info->point; primitive_info->point.x+=cursor; } else { mvg_info.point=primitive_info->point; cursor=0.0; } clone_info->render=MagickFalse; clone_info->text=AcquireString(token); status&=GetTypeMetrics(image,clone_info,&metrics,exception); clone_info=DestroyDrawInfo(clone_info); cursor+=metrics.width; if (graphic_context[n]->compliance != SVGCompliance) cursor=0.0; break; } case ImagePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); break; } } mvg_info.offset=i; if (status == 0) break; primitive_info[i].primitive=UndefinedPrimitive; if ((draw_info->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p),p); /* Sanity check. */ status&=CheckPrimitiveExtent(&mvg_info,ExpandAffine( &graphic_context[n]->affine)); if (status == 0) break; status&=CheckPrimitiveExtent(&mvg_info,(double) graphic_context[n]->stroke_width); if (status == 0) break; if (i == 0) continue; /* Transform points. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+ graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx; primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+ graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty; point=primitive_info[i].point; if (point.x < graphic_context[n]->bounds.x1) graphic_context[n]->bounds.x1=point.x; if (point.y < graphic_context[n]->bounds.y1) graphic_context[n]->bounds.y1=point.y; if (point.x > graphic_context[n]->bounds.x2) graphic_context[n]->bounds.x2=point.x; if (point.y > graphic_context[n]->bounds.y2) graphic_context[n]->bounds.y2=point.y; if (primitive_info[i].primitive == ImagePrimitive) break; if (i >= (ssize_t) number_points) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); } if (graphic_context[n]->render != MagickFalse) { if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) && (graphic_context[n]->clip_mask != (char *) NULL) && (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0)) { const char *clip_path; clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image,graphic_context[n]->clip_mask, clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); } status&=DrawPrimitive(image,graphic_context[n],primitive_info, exception); } proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType) primitive_extent); if (proceed == MagickFalse) break; if (status == 0) break; } if (draw_info->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image"); /* Relinquish resources. */ macros=DestroySplayTree(macros); token=DestroyString(token); if (primitive_info != (PrimitiveInfo *) NULL) { for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info); } primitive=DestroyString(primitive); if (stops != (StopInfo *) NULL) stops=(StopInfo *) RelinquishMagickMemory(stops); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); if (status == MagickFalse) ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition", keyword); return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, ExceptionInfo *exception) { return(RenderMVGContent(image,draw_info,0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P a t t e r n P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPatternPath() draws a pattern. % % The format of the DrawPatternPath method is: % % MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info, % const char *name,Image **pattern,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o name: the pattern name. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawPatternPath(Image *image, const DrawInfo *draw_info,const char *name,Image **pattern, ExceptionInfo *exception) { char property[MagickPathExtent]; const char *geometry, *path, *type; DrawInfo *clone_info; ImageInfo *image_info; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(draw_info != (const DrawInfo *) NULL); assert(name != (const char *) NULL); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); (void) FormatLocaleString(property,MagickPathExtent,"%s",name); path=GetImageArtifact(image,property); if (path == (const char *) NULL) return(MagickFalse); (void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name); geometry=GetImageArtifact(image,property); if (geometry == (const char *) NULL) return(MagickFalse); if ((*pattern) != (Image *) NULL) *pattern=DestroyImage(*pattern); image_info=AcquireImageInfo(); image_info->size=AcquireString(geometry); *pattern=AcquireImage(image_info,exception); image_info=DestroyImageInfo(image_info); (void) QueryColorCompliance("#00000000",AllCompliance, &(*pattern)->background_color,exception); (void) SetImageBackgroundColor(*pattern,exception); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), "begin pattern-path %s %s",name,geometry); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=DestroyImage(clone_info->stroke_pattern); (void) FormatLocaleString(property,MagickPathExtent,"%s-type",name); type=GetImageArtifact(image,property); if (type != (const char *) NULL) clone_info->gradient.type=(GradientType) ParseCommandOption( MagickGradientOptions,MagickFalse,type); (void) CloneString(&clone_info->primitive,path); status=RenderMVGContent(*pattern,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w P o l y g o n P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPolygonPrimitive() draws a polygon on the image. % % The format of the DrawPolygonPrimitive method is: % % MagickBooleanType DrawPolygonPrimitive(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static PolygonInfo **DestroyPolygonTLS(PolygonInfo **polygon_info) { ssize_t i; assert(polygon_info != (PolygonInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (polygon_info[i] != (PolygonInfo *) NULL) polygon_info[i]=DestroyPolygonInfo(polygon_info[i]); polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info); return(polygon_info); } static PolygonInfo **AcquirePolygonTLS(const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { PathInfo *magick_restrict path_info; PolygonInfo **polygon_info; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads, sizeof(*polygon_info)); if (polygon_info == (PolygonInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PolygonInfo **) NULL); } (void) memset(polygon_info,0,number_threads*sizeof(*polygon_info)); path_info=ConvertPrimitiveToPath(primitive_info,exception); if (path_info == (PathInfo *) NULL) return(DestroyPolygonTLS(polygon_info)); polygon_info[0]=ConvertPathToPolygon(path_info,exception); if (polygon_info[0] == (PolygonInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonTLS(polygon_info)); } path_info=(PathInfo *) RelinquishMagickMemory(path_info); return(polygon_info); } static MagickBooleanType ClonePolygonEdgesTLS(PolygonInfo **polygon_info, const size_t number_threads,ExceptionInfo *exception) { ssize_t i; for (i=1; i < (ssize_t) number_threads; i++) { EdgeInfo *edge_info; ssize_t j; polygon_info[i]=(PolygonInfo *) AcquireMagickMemory( sizeof(*polygon_info[i])); if (polygon_info[i] == (PolygonInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } polygon_info[i]->number_edges=0; edge_info=polygon_info[0]->edges; polygon_info[i]->edges=(EdgeInfo *) AcquireQuantumMemory( polygon_info[0]->number_edges,sizeof(*edge_info)); if (polygon_info[i]->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } (void) memcpy(polygon_info[i]->edges,edge_info, polygon_info[0]->number_edges*sizeof(*edge_info)); for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++) polygon_info[i]->edges[j].points=(PointInfo *) NULL; polygon_info[i]->number_edges=polygon_info[0]->number_edges; for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++) { edge_info=polygon_info[0]->edges+j; polygon_info[i]->edges[j].points=(PointInfo *) AcquireQuantumMemory( edge_info->number_points,sizeof(*edge_info)); if (polygon_info[i]->edges[j].points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } (void) memcpy(polygon_info[i]->edges[j].points,edge_info->points, edge_info->number_points*sizeof(*edge_info->points)); } } return(MagickTrue); } static size_t DestroyEdge(PolygonInfo *polygon_info,const ssize_t edge) { assert(edge < (ssize_t) polygon_info->number_edges); polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory( polygon_info->edges[edge].points); polygon_info->number_edges--; if (edge < (ssize_t) polygon_info->number_edges) (void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1, (size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges)); return(polygon_info->number_edges); } static double GetFillAlpha(PolygonInfo *polygon_info,const double mid, const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x, const ssize_t y,double *stroke_alpha) { double alpha, beta, distance, subpath_alpha; const PointInfo *q; EdgeInfo *p; PointInfo delta; ssize_t i, j, winding_number; /* Compute fill & stroke opacity for this (x,y) point. */ *stroke_alpha=0.0; subpath_alpha=0.0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= (p->bounds.y1-mid-0.5)) break; if ((double) y > (p->bounds.y2+mid+0.5)) { p--; (void) DestroyEdge(polygon_info,j--); continue; } if (((double) x <= (p->bounds.x1-mid-0.5)) || ((double) x > (p->bounds.x2+mid+0.5))) continue; i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) p->number_points; i++) { if ((double) y <= (p->points[i-1].y-mid-0.5)) break; if ((double) y > (p->points[i].y+mid+0.5)) continue; if (p->scanline != (double) y) { p->scanline=(double) y; p->highwater=(size_t) i; } /* Compute distance between a point and an edge. */ q=p->points+i-1; delta.x=(q+1)->x-q->x; delta.y=(q+1)->y-q->y; beta=delta.x*(x-q->x)+delta.y*(y-q->y); /* segLen*point-cos(theta) */ if (beta <= 0.0) { /* Cosine <= 0, point is closest to q. */ delta.x=(double) x-q->x; delta.y=(double) y-q->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=delta.x*delta.x+delta.y*delta.y; /* segLen*segLen */ if (beta >= alpha) { /* Point is closest to q+1. */ delta.x=(double) x-(q+1)->x; delta.y=(double) y-(q+1)->y; distance=delta.x*delta.x+delta.y*delta.y; } else { /* Point is closest to point between q & q+1. */ alpha=PerceptibleReciprocal(alpha); beta=delta.x*(y-q->y)-delta.y*(x-q->x); distance=alpha*beta*beta; } } /* Compute stroke & subpath opacity. */ beta=0.0; if (p->ghostline == MagickFalse) { alpha=mid+0.5; if ((*stroke_alpha < 1.0) && (distance <= ((alpha+0.25)*(alpha+0.25)))) { alpha=mid-0.5; if (distance <= ((alpha+0.25)*(alpha+0.25))) *stroke_alpha=1.0; else { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt((double) distance); alpha=beta-mid-0.5; if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25))) *stroke_alpha=(alpha-0.25)*(alpha-0.25); } } } if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0)) continue; if (distance <= 0.0) { subpath_alpha=1.0; continue; } if (distance > 1.0) continue; if (fabs(beta) < MagickEpsilon) { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt(distance); } alpha=beta-1.0; if (subpath_alpha < (alpha*alpha)) subpath_alpha=alpha*alpha; } } /* Compute fill opacity. */ if (fill == MagickFalse) return(0.0); if (subpath_alpha >= 1.0) return(1.0); /* Determine winding number. */ winding_number=0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= p->bounds.y1) break; if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1)) continue; if ((double) x > p->bounds.x2) { winding_number+=p->direction != 0 ? 1 : -1; continue; } i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) (p->number_points-1); i++) if ((double) y <= p->points[i].y) break; q=p->points+i-1; if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x))) winding_number+=p->direction != 0 ? 1 : -1; } if (fill_rule != NonZeroRule) { if ((MagickAbsoluteValue(winding_number) & 0x01) != 0) return(1.0); } else if (MagickAbsoluteValue(winding_number) != 0) return(1.0); return(subpath_alpha); } static MagickBooleanType DrawPolygonPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { typedef struct _ExtentInfo { ssize_t x1, y1, x2, y2; } ExtentInfo; CacheView *image_view; const char *artifact; double mid; EdgeInfo *p; ExtentInfo poly_extent; MagickBooleanType fill, status; PolygonInfo **magick_restrict polygon_info; SegmentInfo bounds; size_t number_threads; ssize_t i, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); assert(primitive_info != (PrimitiveInfo *) NULL); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (primitive_info->coordinates <= 1) return(MagickTrue); /* Compute bounding box. */ polygon_info=AcquirePolygonTLS(primitive_info,exception); if (polygon_info == (PolygonInfo **) NULL) return(MagickFalse); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon"); fill=(primitive_info->method == FillToBorderMethod) || (primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse; mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; bounds=polygon_info[0]->edges[0].bounds; artifact=GetImageArtifact(image,"draw:render-bounding-rectangles"); if (IsStringTrue(artifact) != MagickFalse) (void) DrawBoundingRectangles(image,draw_info,polygon_info[0],exception); for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++) { p=polygon_info[0]->edges+i; if (p->bounds.x1 < bounds.x1) bounds.x1=p->bounds.x1; if (p->bounds.y1 < bounds.y1) bounds.y1=p->bounds.y1; if (p->bounds.x2 > bounds.x2) bounds.x2=p->bounds.x2; if (p->bounds.y2 > bounds.y2) bounds.y2=p->bounds.y2; } bounds.x1-=(mid+1.0); bounds.y1-=(mid+1.0); bounds.x2+=(mid+1.0); bounds.y2+=(mid+1.0); if ((bounds.x1 >= (double) image->columns) || (bounds.y1 >= (double) image->rows) || (bounds.x2 <= 0.0) || (bounds.y2 <= 0.0)) { polygon_info=DestroyPolygonTLS(polygon_info); return(MagickTrue); /* virtual polygon */ } bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x1; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y1; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x2; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y2; poly_extent.x1=CastDoubleToLong(ceil(bounds.x1-0.5)); poly_extent.y1=CastDoubleToLong(ceil(bounds.y1-0.5)); poly_extent.x2=CastDoubleToLong(floor(bounds.x2+0.5)); poly_extent.y2=CastDoubleToLong(floor(bounds.y2+0.5)); number_threads=GetMagickNumberThreads(image,image,poly_extent.y2- poly_extent.y1+1,1); status=ClonePolygonEdgesTLS(polygon_info,number_threads,exception); if (status == MagickFalse) { polygon_info=DestroyPolygonTLS(polygon_info); return(status); } image_view=AcquireAuthenticCacheView(image,exception); if ((primitive_info->coordinates == 1) || (polygon_info[0]->number_edges == 0)) { /* Draw point. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ num_threads(number_threads) #endif for (y=poly_extent.y1; y <= poly_extent.y2; y++) { PixelInfo pixel; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; x=poly_extent.x1; q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (poly_extent.x2- x+1),1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for ( ; x <= poly_extent.x2; x++) { if ((x == CastDoubleToLong(ceil(primitive_info->point.x-0.5))) && (y == CastDoubleToLong(ceil(primitive_info->point.y-0.5)))) { GetFillColor(draw_info,x-poly_extent.x1,y-poly_extent.y1,&pixel, exception); SetPixelViaPixelInfo(image,&pixel,q); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonTLS(polygon_info); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-polygon"); return(status); } /* Draw polygon or line. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ num_threads(number_threads) #endif for (y=poly_extent.y1; y <= poly_extent.y2; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,poly_extent.x1,y,(size_t) (poly_extent.x2-poly_extent.x1+1),1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=poly_extent.x1; x <= poly_extent.x2; x++) { double fill_alpha, stroke_alpha; PixelInfo fill_color, stroke_color; /* Fill and/or stroke. */ fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule, x,y,&stroke_alpha); if (draw_info->stroke_antialias == MagickFalse) { fill_alpha=fill_alpha > 0.5 ? 1.0 : 0.0; stroke_alpha=stroke_alpha > 0.5 ? 1.0 : 0.0; } GetFillColor(draw_info,x-poly_extent.x1,y-poly_extent.y1,&fill_color, exception); CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q, (double) GetPixelAlpha(image,q),q); GetStrokeColor(draw_info,x-poly_extent.x1,y-poly_extent.y1,&stroke_color, exception); CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q, (double) GetPixelAlpha(image,q),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonTLS(polygon_info); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image. % % The format of the DrawPrimitive method is: % % MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info, % PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info) { const char *methods[] = { "point", "replace", "floodfill", "filltoborder", "reset", "?" }; PointInfo p, point, q; ssize_t i, x; ssize_t coordinates, y; x=CastDoubleToLong(ceil(primitive_info->point.x-0.5)); y=CastDoubleToLong(ceil(primitive_info->point.y-0.5)); switch (primitive_info->primitive) { case AlphaPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ColorPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ColorPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ImagePrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ImagePrimitive %.20g,%.20g",(double) x,(double) y); return; } case PointPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "PointPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case TextPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "TextPrimitive %.20g,%.20g",(double) x,(double) y); return; } default: break; } coordinates=0; p=primitive_info[0].point; q.x=(-1.0); q.y=(-1.0); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; if (coordinates <= 0) { coordinates=(ssize_t) primitive_info[i].coordinates; (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin open (%.20g)",(double) coordinates); p=point; } point=primitive_info[i].point; if ((fabs(q.x-point.x) >= MagickEpsilon) || (fabs(q.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y); else (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y); q=point; coordinates--; if (coordinates > 0) continue; if ((fabs(p.x-point.x) >= MagickEpsilon) || (fabs(p.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)", (double) coordinates); else (void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)", (double) coordinates); } } MagickExport MagickBooleanType DrawPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickStatusType status; ssize_t i, x; ssize_t y; if (draw_info->debug != MagickFalse) { (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-primitive"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx, draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy, draw_info->affine.tx,draw_info->affine.ty); } status=MagickTrue; if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsPixelInfoGray(&draw_info->fill) == MagickFalse) || (IsPixelInfoGray(&draw_info->stroke) == MagickFalse))) status&=SetImageColorspace(image,sRGBColorspace,exception); if (draw_info->compliance == SVGCompliance) { status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask, exception); status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask, exception); } x=CastDoubleToLong(ceil(primitive_info->point.x-0.5)); y=CastDoubleToLong(ceil(primitive_info->point.y-0.5)); image_view=AcquireAuthenticCacheView(image,exception); switch (primitive_info->primitive) { case AlphaPrimitive: { if (image->alpha_trait == UndefinedPixelTrait) status&=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { PixelInfo pixel, target; status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { ChannelType channel_mask; PixelInfo target; status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } channel_mask=SetImageChannelMask(image,AlphaChannel); status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); (void) SetImageChannelMask(image,channel_mask); break; } case ResetMethod: { PixelInfo pixel; for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } } break; } case ColorPrimitive: { switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetPixelInfo(image,&pixel); GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { PixelInfo pixel, target; status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { PixelInfo target; status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); break; } case ResetMethod: { PixelInfo pixel; GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } } break; } case ImagePrimitive: { AffineMatrix affine; char composite_geometry[MagickPathExtent]; Image *composite_image, *composite_images; ImageInfo *clone_info; RectangleInfo geometry; ssize_t x1, y1; if (primitive_info->text == (char *) NULL) break; clone_info=AcquireImageInfo(); composite_images=(Image *) NULL; if (LocaleNCompare(primitive_info->text,"data:",5) == 0) composite_images=ReadInlineImage(clone_info,primitive_info->text, exception); else if (*primitive_info->text != '\0') { MagickBooleanType path_status; struct stat attributes; /* Read composite image. */ (void) CopyMagickString(clone_info->filename,primitive_info->text, MagickPathExtent); (void) SetImageInfo(clone_info,1,exception); (void) CopyMagickString(clone_info->filename,primitive_info->text, MagickPathExtent); if (clone_info->size != (char *) NULL) clone_info->size=DestroyString(clone_info->size); if (clone_info->extract != (char *) NULL) clone_info->extract=DestroyString(clone_info->extract); path_status=GetPathAttributes(clone_info->filename,&attributes); if (path_status != MagickFalse) { if (S_ISCHR(attributes.st_mode) == 0) composite_images=ReadImage(clone_info,exception); else (void) ThrowMagickException(exception,GetMagickModule(), FileOpenError,"UnableToOpenFile","`%s'", clone_info->filename); } else if ((LocaleCompare(clone_info->magick,"ftp") != 0) && (LocaleCompare(clone_info->magick,"http") != 0) && (LocaleCompare(clone_info->magick,"https") != 0)) composite_images=ReadImage(clone_info,exception); else (void) ThrowMagickException(exception,GetMagickModule(), FileOpenError,"UnableToOpenFile","`%s'",clone_info->filename); } clone_info=DestroyImageInfo(clone_info); if (composite_images == (Image *) NULL) { status=MagickFalse; break; } composite_image=RemoveFirstImageFromList(&composite_images); composite_images=DestroyImageList(composite_images); (void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor) NULL,(void *) NULL); x1=CastDoubleToLong(ceil(primitive_info[1].point.x-0.5)); y1=CastDoubleToLong(ceil(primitive_info[1].point.y-0.5)); if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) || ((y1 != 0L) && (y1 != (ssize_t) composite_image->rows))) { /* Resize image. */ (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y); composite_image->filter=image->filter; status&=TransformImage(&composite_image,(char *) NULL, composite_geometry,exception); } if (composite_image->alpha_trait == UndefinedPixelTrait) status&=SetImageAlphaChannel(composite_image,OpaqueAlphaChannel, exception); if (draw_info->alpha != OpaqueAlpha) status&=SetImageAlpha(composite_image,draw_info->alpha,exception); SetGeometry(image,&geometry); image->gravity=draw_info->gravity; geometry.x=x; geometry.y=y; (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double) composite_image->rows,(double) geometry.x,(double) geometry.y); (void) ParseGravityGeometry(image,composite_geometry,&geometry,exception); affine=draw_info->affine; affine.tx=(double) geometry.x; affine.ty=(double) geometry.y; composite_image->interpolate=image->interpolate; if ((draw_info->compose == OverCompositeOp) || (draw_info->compose == SrcOverCompositeOp)) status&=DrawAffineImage(image,composite_image,&affine,exception); else status&=CompositeImage(image,composite_image,draw_info->compose, MagickTrue,geometry.x,geometry.y,exception); composite_image=DestroyImage(composite_image); break; } case PointPrimitive: { PixelInfo fill_color; Quantum *q; if ((y < 0) || (y >= (ssize_t) image->rows)) break; if ((x < 0) || (x >= (ssize_t) image->columns)) break; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&fill_color,exception); CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,(double) GetPixelAlpha(image,q),q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case TextPrimitive: { char geometry[MagickPathExtent]; DrawInfo *clone_info; if (primitive_info->text == (char *) NULL) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->text,primitive_info->text); (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); (void) CloneString(&clone_info->geometry,geometry); status&=AnnotateImage(image,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); break; } default: { double mid, scale; DrawInfo *clone_info; if (IsEventLogging() != MagickFalse) LogPrimitiveInfo(primitive_info); scale=ExpandAffine(&draw_info->affine); if ((draw_info->dash_pattern != (double *) NULL) && (fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) && (fabs(scale*draw_info->stroke_width) >= MagickEpsilon) && (draw_info->stroke.alpha != (Quantum) TransparentAlpha)) { /* Draw dash polygon. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); if (status != MagickFalse) status&=DrawDashPolygon(draw_info,primitive_info,image,exception); break; } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; if ((mid > 1.0) && ((draw_info->stroke.alpha != (Quantum) TransparentAlpha) || (draw_info->stroke_pattern != (Image *) NULL))) { double point_x, point_y; MagickBooleanType closed_path; /* Draw strokes while respecting line cap/join attributes. */ closed_path=primitive_info[0].closed_subpath; i=(ssize_t) primitive_info[0].coordinates; point_x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x); point_y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y); if ((point_x < MagickEpsilon) && (point_y < MagickEpsilon)) closed_path=MagickTrue; if ((((draw_info->linecap == RoundCap) || (closed_path != MagickFalse)) && (draw_info->linejoin == RoundJoin)) || (primitive_info[i].primitive != UndefinedPrimitive)) { status&=DrawPolygonPrimitive(image,draw_info,primitive_info, exception); break; } clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); if (status != MagickFalse) status&=DrawStrokePolygon(image,draw_info,primitive_info,exception); break; } status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception); break; } } image_view=DestroyCacheView(image_view); if (draw_info->compliance == SVGCompliance) { status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception); status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception); } if (draw_info->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w S t r o k e P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on % the image while respecting the line cap and join attributes. % % The format of the DrawStrokePolygon method is: % % MagickBooleanType DrawStrokePolygon(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % */ static MagickBooleanType DrawRoundLinecap(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { PrimitiveInfo linecap[5]; ssize_t i; for (i=0; i < 4; i++) linecap[i]=(*primitive_info); linecap[0].coordinates=4; linecap[1].point.x+=2.0*MagickEpsilon; linecap[2].point.x+=2.0*MagickEpsilon; linecap[2].point.y+=2.0*MagickEpsilon; linecap[3].point.y+=2.0*MagickEpsilon; linecap[4].primitive=UndefinedPrimitive; return(DrawPolygonPrimitive(image,draw_info,linecap,exception)); } static MagickBooleanType DrawStrokePolygon(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { DrawInfo *clone_info; MagickBooleanType closed_path; MagickStatusType status; PrimitiveInfo *stroke_polygon; const PrimitiveInfo *p, *q; /* Draw stroked polygon. */ if (draw_info->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-stroke-polygon"); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill=draw_info->stroke; if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; clone_info->stroke_width=0.0; clone_info->fill_rule=NonZeroRule; status=MagickTrue; for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates) { if (p->coordinates == 1) continue; stroke_polygon=TraceStrokePolygon(draw_info,p,exception); if (stroke_polygon == (PrimitiveInfo *) NULL) { status=0; break; } status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception); stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); if (status == 0) break; q=p+p->coordinates-1; closed_path=p->closed_subpath; if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse)) { status&=DrawRoundLinecap(image,draw_info,p,exception); status&=DrawRoundLinecap(image,draw_info,q,exception); } } clone_info=DestroyDrawInfo(clone_info); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-stroke-polygon"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A f f i n e M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAffineMatrix() returns an AffineMatrix initialized to the identity % matrix. % % The format of the GetAffineMatrix method is: % % void GetAffineMatrix(AffineMatrix *affine_matrix) % % A description of each parameter follows: % % o affine_matrix: the affine matrix. % */ MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix) { if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(affine_matrix != (AffineMatrix *) NULL); (void) memset(affine_matrix,0,sizeof(*affine_matrix)); affine_matrix->sx=1.0; affine_matrix->sy=1.0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetDrawInfo() initializes draw_info to default values from image_info. % % The format of the GetDrawInfo method is: % % void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info.. % % o draw_info: the draw info. % */ MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) { char *next_token; const char *option; ExceptionInfo *exception; ImageInfo *clone_info; /* Initialize draw attributes. */ assert(draw_info != (DrawInfo *) NULL); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); (void) memset(draw_info,0,sizeof(*draw_info)); clone_info=CloneImageInfo(image_info); GetAffineMatrix(&draw_info->affine); exception=AcquireExceptionInfo(); (void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill, exception); (void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke, exception); draw_info->stroke_antialias=clone_info->antialias; draw_info->stroke_width=1.0; draw_info->fill_rule=EvenOddRule; draw_info->alpha=OpaqueAlpha; draw_info->fill_alpha=OpaqueAlpha; draw_info->stroke_alpha=OpaqueAlpha; draw_info->linecap=ButtCap; draw_info->linejoin=MiterJoin; draw_info->miterlimit=10; draw_info->decorate=NoDecoration; draw_info->pointsize=12.0; draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha; draw_info->compose=OverCompositeOp; draw_info->render=MagickTrue; draw_info->clip_path=MagickFalse; draw_info->debug=(GetLogEventMask() & (DrawEvent | AnnotateEvent)) != 0 ? MagickTrue : MagickFalse; if (clone_info->font != (char *) NULL) draw_info->font=AcquireString(clone_info->font); if (clone_info->density != (char *) NULL) draw_info->density=AcquireString(clone_info->density); draw_info->text_antialias=clone_info->antialias; if (fabs(clone_info->pointsize) >= MagickEpsilon) draw_info->pointsize=clone_info->pointsize; draw_info->border_color=clone_info->border_color; if (clone_info->server_name != (char *) NULL) draw_info->server_name=AcquireString(clone_info->server_name); option=GetImageOption(clone_info,"direction"); if (option != (const char *) NULL) draw_info->direction=(DirectionType) ParseCommandOption( MagickDirectionOptions,MagickFalse,option); else draw_info->direction=UndefinedDirection; option=GetImageOption(clone_info,"encoding"); if (option != (const char *) NULL) (void) CloneString(&draw_info->encoding,option); option=GetImageOption(clone_info,"family"); if (option != (const char *) NULL) (void) CloneString(&draw_info->family,option); option=GetImageOption(clone_info,"fill"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->fill, exception); option=GetImageOption(clone_info,"gravity"); if (option != (const char *) NULL) draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(clone_info,"interline-spacing"); if (option != (const char *) NULL) draw_info->interline_spacing=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"interword-spacing"); if (option != (const char *) NULL) draw_info->interword_spacing=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"kerning"); if (option != (const char *) NULL) draw_info->kerning=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"stroke"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke, exception); option=GetImageOption(clone_info,"strokewidth"); if (option != (const char *) NULL) draw_info->stroke_width=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"style"); if (option != (const char *) NULL) draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse,option); option=GetImageOption(clone_info,"undercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor, exception); option=GetImageOption(clone_info,"weight"); if (option != (const char *) NULL) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(option); draw_info->weight=(size_t) weight; } exception=DestroyExceptionInfo(exception); draw_info->signature=MagickCoreSignature; clone_info=DestroyImageInfo(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r m u t a t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Permutate() returns the permuation of the (n,k). % % The format of the Permutate method is: % % void Permutate(ssize_t n,ssize_t k) % % A description of each parameter follows: % % o n: % % o k: % % */ static inline double Permutate(const ssize_t n,const ssize_t k) { double r; ssize_t i; r=1.0; for (i=k+1; i <= n; i++) r*=i; for (i=1; i <= (n-k); i++) r/=i; return(r); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a c e P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TracePrimitive is a collection of methods for generating graphic % primitives such as arcs, ellipses, paths, etc. % */ static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo degrees) { PointInfo center, radius; center.x=0.5*(end.x+start.x); center.y=0.5*(end.y+start.y); radius.x=fabs(center.x-start.x); radius.y=fabs(center.y-start.y); return(TraceEllipse(mvg_info,center,radius,degrees)); } static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo arc,const double angle, const MagickBooleanType large_arc,const MagickBooleanType sweep) { double alpha, beta, delta, factor, gamma, theta; MagickStatusType status; PointInfo center, points[3], radii; double cosine, sine; PrimitiveInfo *primitive_info; PrimitiveInfo *p; ssize_t i; size_t arc_segments; ssize_t offset; offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) return(TracePoint(primitive_info,end)); radii.x=fabs(arc.x); radii.y=fabs(arc.y); if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon)) return(TraceLine(primitive_info,start,end)); cosine=cos(DegreesToRadians(fmod((double) angle,360.0))); sine=sin(DegreesToRadians(fmod((double) angle,360.0))); center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2); center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2); delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/ (radii.y*radii.y); if (delta < MagickEpsilon) return(TraceLine(primitive_info,start,end)); if (delta > 1.0) { radii.x*=sqrt((double) delta); radii.y*=sqrt((double) delta); } points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x); points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y); points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x); points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y); alpha=points[1].x-points[0].x; beta=points[1].y-points[0].y; if (fabs(alpha*alpha+beta*beta) < MagickEpsilon) return(TraceLine(primitive_info,start,end)); factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25; if (factor <= 0.0) factor=0.0; else { factor=sqrt((double) factor); if (sweep == large_arc) factor=(-factor); } center.x=(double) ((points[0].x+points[1].x)/2-factor*beta); center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha); alpha=atan2(points[0].y-center.y,points[0].x-center.x); theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha; if ((theta < 0.0) && (sweep != MagickFalse)) theta+=2.0*MagickPI; else if ((theta > 0.0) && (sweep == MagickFalse)) theta-=2.0*MagickPI; arc_segments=(size_t) CastDoubleToLong(ceil(fabs((double) (theta/(0.5* MagickPI+MagickEpsilon))))); status=MagickTrue; p=primitive_info; for (i=0; i < (ssize_t) arc_segments; i++) { beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments)); gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))* sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/ sin(fmod((double) beta,DegreesToRadians(360.0))); points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x; p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y; (p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y* points[0].y); (p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y* points[0].y); (p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y* points[1].y); (p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y* points[1].y); (p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y* points[2].y); (p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y* points[2].y); if (i == (ssize_t) (arc_segments-1)) (p+3)->point=end; status&=TraceBezier(mvg_info,4); if (status == 0) break; p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; p+=p->coordinates; } if (status == 0) return(MagickFalse); mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceBezier(MVGInfo *mvg_info, const size_t number_coordinates) { double alpha, *coefficients, weight; PointInfo end, point, *points; PrimitiveInfo *primitive_info; PrimitiveInfo *p; ssize_t i, j; size_t control_points, quantum; /* Allocate coefficients. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; quantum=number_coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { for (j=i+1; j < (ssize_t) number_coordinates; j++) { alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x); if (alpha > (double) MAGICK_SSIZE_MAX) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (alpha > (double) quantum) quantum=(size_t) alpha; alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y); if (alpha > (double) MAGICK_SSIZE_MAX) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (alpha > (double) quantum) quantum=(size_t) alpha; } } primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; quantum=MagickMin(quantum/number_coordinates,BezierQuantum); coefficients=(double *) AcquireQuantumMemory(number_coordinates, sizeof(*coefficients)); points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates* sizeof(*points)); if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL)) { if (points != (PointInfo *) NULL) points=(PointInfo *) RelinquishMagickMemory(points); if (coefficients != (double *) NULL) coefficients=(double *) RelinquishMagickMemory(coefficients); (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } control_points=quantum*number_coordinates; if (CheckPrimitiveExtent(mvg_info,(double) control_points+1) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; /* Compute bezier points. */ end=primitive_info[number_coordinates-1].point; for (i=0; i < (ssize_t) number_coordinates; i++) coefficients[i]=Permutate((ssize_t) number_coordinates-1,i); weight=0.0; for (i=0; i < (ssize_t) control_points; i++) { p=primitive_info; point.x=0.0; point.y=0.0; alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0); for (j=0; j < (ssize_t) number_coordinates; j++) { point.x+=alpha*coefficients[j]*p->point.x; point.y+=alpha*coefficients[j]*p->point.y; alpha*=weight/(1.0-weight); p++; } points[i]=point; weight+=1.0/control_points; } /* Bezier curves are just short segmented polys. */ p=primitive_info; for (i=0; i < (ssize_t) control_points; i++) { if (TracePoint(p,points[i]) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } p+=p->coordinates; } if (TracePoint(p,end) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickTrue); } static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start, const PointInfo end) { double alpha, beta, radius; PointInfo offset, degrees; alpha=end.x-start.x; beta=end.y-start.y; radius=hypot((double) alpha,(double) beta); offset.x=(double) radius; offset.y=(double) radius; degrees.x=0.0; degrees.y=360.0; return(TraceEllipse(mvg_info,start,offset,degrees)); } static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center, const PointInfo radii,const PointInfo arc) { double coordinates, delta, step, x, y; PointInfo angle, point; PrimitiveInfo *primitive_info; PrimitiveInfo *p; ssize_t i; /* Ellipses are just short segmented polys. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon)) return(MagickTrue); delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y)); step=MagickPI/8.0; if ((delta >= 0.0) && (delta < (MagickPI/8.0))) step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0); angle.x=DegreesToRadians(arc.x); y=arc.y; while (y < arc.x) y+=360.0; angle.y=DegreesToRadians(y); coordinates=ceil((angle.y-angle.x)/step+1.0); if (CheckPrimitiveExtent(mvg_info,coordinates) == MagickFalse) return(MagickFalse); primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; for (p=primitive_info; angle.x < angle.y; angle.x+=step) { point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; } point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; x=fabs(primitive_info[0].point.x- primitive_info[primitive_info->coordinates-1].point.x); y=fabs(primitive_info[0].point.y- primitive_info[primitive_info->coordinates-1].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { if (TracePoint(primitive_info,start) == MagickFalse) return(MagickFalse); if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) { primitive_info->primitive=PointPrimitive; primitive_info->coordinates=1; return(MagickTrue); } if (TracePoint(primitive_info+1,end) == MagickFalse) return(MagickFalse); (primitive_info+1)->primitive=primitive_info->primitive; primitive_info->coordinates=2; primitive_info->closed_subpath=MagickFalse; return(MagickTrue); } static ssize_t TracePath(MVGInfo *mvg_info,const char *path, ExceptionInfo *exception) { char *next_token, token[MagickPathExtent] = ""; const char *p; double x, y; int attribute, last_attribute; MagickBooleanType status; PointInfo end = {0.0, 0.0}, points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} }, point = {0.0, 0.0}, start = {0.0, 0.0}; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; PrimitiveInfo *q; ssize_t i; size_t number_coordinates, z_count; ssize_t subpath_offset; subpath_offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; status=MagickTrue; attribute=0; number_coordinates=0; z_count=0; primitive_type=primitive_info->primitive; q=primitive_info; for (p=path; *p != '\0'; ) { if (status == MagickFalse) break; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == '\0') break; last_attribute=attribute; attribute=(int) (*p++); switch (attribute) { case 'a': case 'A': { double angle = 0.0; MagickBooleanType large_arc = MagickFalse, sweep = MagickFalse; PointInfo arc = {0.0, 0.0}; /* Elliptical arc. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); arc.x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); arc.y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse; (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse; if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'A' ? x : point.x+x); end.y=(double) (attribute == (int) 'A' ? y : point.y+y); if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'c': case 'C': { /* Cubic Bézier curve. */ do { points[0]=point; for (i=1; i < 4; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'C' ? x : point.x+x); end.y=(double) (attribute == (int) 'C' ? y : point.y+y); points[i]=end; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'H': case 'h': { do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'H' ? x: point.x+x); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'l': case 'L': { /* Line to. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'L' ? x : point.x+x); point.y=(double) (attribute == (int) 'L' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'M': case 'm': { /* Move to. */ if (mvg_info->offset != subpath_offset) { primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; } i=0; do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'M' ? x : point.x+x); point.y=(double) (attribute == (int) 'M' ? y : point.y+y); if (i == 0) start=point; i++; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'q': case 'Q': { /* Quadratic Bézier curve. */ do { points[0]=point; for (i=1; i < 3; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (*p == ',') p++; end.x=(double) (attribute == (int) 'Q' ? x : point.x+x); end.y=(double) (attribute == (int) 'Q' ? y : point.y+y); points[i]=end; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 's': case 'S': { /* Cubic Bézier curve. */ do { points[0]=points[3]; points[1].x=2.0*points[3].x-points[2].x; points[1].y=2.0*points[3].y-points[2].y; for (i=2; i < 4; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (*p == ',') p++; end.x=(double) (attribute == (int) 'S' ? x : point.x+x); end.y=(double) (attribute == (int) 'S' ? y : point.y+y); points[i]=end; } if (strchr("CcSs",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 't': case 'T': { /* Quadratic Bézier curve. */ do { points[0]=points[2]; points[1].x=2.0*points[2].x-points[1].x; points[1].y=2.0*points[2].y-points[1].y; for (i=2; i < 3; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'T' ? x : point.x+x); end.y=(double) (attribute == (int) 'T' ? y : point.y+y); points[i]=end; } if (status == MagickFalse) break; if (strchr("QqTt",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'v': case 'V': { /* Line to. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.y=(double) (attribute == (int) 'V' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'z': case 'Z': { /* Close path. */ point=start; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); primitive_info->closed_subpath=MagickTrue; number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; z_count++; break; } default: { ThrowPointExpectedException(token,exception); break; } } } if (status == MagickFalse) return(-1); primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { q--; q->primitive=primitive_type; if (z_count > 1) q->method=FillToBorderMethod; } q=primitive_info; return((ssize_t) number_coordinates); } static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { PointInfo point; PrimitiveInfo *p; ssize_t i; p=primitive_info; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=start.x; point.y=end.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,end) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=end.x; point.y=start.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info, const PointInfo start,const PointInfo end,PointInfo arc) { PointInfo degrees, point, segment; PrimitiveInfo *primitive_info; PrimitiveInfo *p; ssize_t i; ssize_t offset; offset=mvg_info->offset; segment.x=fabs(end.x-start.x); segment.y=fabs(end.y-start.y); if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon)) { (*mvg_info->primitive_info+mvg_info->offset)->coordinates=0; return(MagickTrue); } if (arc.x > (0.5*segment.x)) arc.x=0.5*segment.x; if (arc.y > (0.5*segment.y)) arc.y=0.5*segment.y; point.x=start.x+segment.x-arc.x; point.y=start.y+arc.y; degrees.x=270.0; degrees.y=360.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+segment.x-arc.x; point.y=start.y+segment.y-arc.y; degrees.x=0.0; degrees.y=90.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+segment.y-arc.y; degrees.x=90.0; degrees.y=180.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+arc.y; degrees.x=180.0; degrees.y=270.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse) return(MagickFalse); p+=p->coordinates; mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info, const size_t number_vertices,const double offset) { double distance; double dx, dy; ssize_t i; ssize_t j; dx=0.0; dy=0.0; for (i=1; i < (ssize_t) number_vertices; i++) { dx=primitive_info[0].point.x-primitive_info[i].point.x; dy=primitive_info[0].point.y-primitive_info[i].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } if (i == (ssize_t) number_vertices) i=(ssize_t) number_vertices-1L; distance=hypot((double) dx,(double) dy); primitive_info[0].point.x=(double) (primitive_info[i].point.x+ dx*(distance+offset)/distance); primitive_info[0].point.y=(double) (primitive_info[i].point.y+ dy*(distance+offset)/distance); for (j=(ssize_t) number_vertices-2; j >= 0; j--) { dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x; dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } distance=hypot((double) dx,(double) dy); primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+ dx*(distance+offset)/distance); primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+ dy*(distance+offset)/distance); return(MagickTrue); } static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,ExceptionInfo *exception) { #define MaxStrokePad (6*BezierQuantum+360) #define CheckPathExtent(pad_p,pad_q) \ { \ if ((pad_p) > MaxBezierCoordinates) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ else \ if ((ssize_t) (p+(pad_p)) >= (ssize_t) extent_p) \ { \ if (~extent_p < (pad_p)) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ else \ { \ extent_p+=(pad_p); \ stroke_p=(PointInfo *) ResizeQuantumMemory(stroke_p,extent_p+ \ MaxStrokePad,sizeof(*stroke_p)); \ } \ } \ if ((pad_q) > MaxBezierCoordinates) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ else \ if ((ssize_t) (q+(pad_q)) >= (ssize_t) extent_q) \ { \ if (~extent_q < (pad_q)) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ else \ { \ extent_q+=(pad_q); \ stroke_q=(PointInfo *) ResizeQuantumMemory(stroke_q,extent_q+ \ MaxStrokePad,sizeof(*stroke_q)); \ } \ } \ if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) \ { \ if (stroke_p != (PointInfo *) NULL) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ if (stroke_q != (PointInfo *) NULL) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ polygon_primitive=(PrimitiveInfo *) \ RelinquishMagickMemory(polygon_primitive); \ (void) ThrowMagickException(exception,GetMagickModule(), \ ResourceLimitError,"MemoryAllocationFailed","`%s'",""); \ return((PrimitiveInfo *) NULL); \ } \ } typedef struct _StrokeSegment { double p, q; } StrokeSegment; double delta_theta, dot_product, mid, miterlimit; MagickBooleanType closed_path; PointInfo box_p[5], box_q[5], center, offset, *stroke_p, *stroke_q; PrimitiveInfo *polygon_primitive, *stroke_polygon; ssize_t i; size_t arc_segments, extent_p, extent_q, number_vertices; ssize_t j, n, p, q; StrokeSegment dx = {0.0, 0.0}, dy = {0.0, 0.0}, inverse_slope = {0.0, 0.0}, slope = {0.0, 0.0}, theta = {0.0, 0.0}; /* Allocate paths. */ number_vertices=primitive_info->coordinates; polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_vertices+2UL,sizeof(*polygon_primitive)); if (polygon_primitive == (PrimitiveInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PrimitiveInfo *) NULL); } (void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices* sizeof(*polygon_primitive)); offset.x=primitive_info[number_vertices-1].point.x-primitive_info[0].point.x; offset.y=primitive_info[number_vertices-1].point.y-primitive_info[0].point.y; closed_path=(fabs(offset.x) < MagickEpsilon) && (fabs(offset.y) < MagickEpsilon) ? MagickTrue : MagickFalse; if (((draw_info->linejoin == RoundJoin) || (draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse)) { polygon_primitive[number_vertices]=primitive_info[1]; number_vertices++; } polygon_primitive[number_vertices].primitive=UndefinedPrimitive; /* Compute the slope for the first line segment, p. */ dx.p=0.0; dy.p=0.0; for (n=1; n < (ssize_t) number_vertices; n++) { dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x; dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y; if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon)) break; } if (n == (ssize_t) number_vertices) { if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse)) { /* Zero length subpath. */ stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory( sizeof(*stroke_polygon)); stroke_polygon[0]=polygon_primitive[0]; stroke_polygon[0].coordinates=0; polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return(stroke_polygon); } n=(ssize_t) number_vertices-1L; } extent_p=2*number_vertices; extent_q=2*number_vertices; stroke_p=(PointInfo *) AcquireQuantumMemory((size_t) extent_p+MaxStrokePad, sizeof(*stroke_p)); stroke_q=(PointInfo *) AcquireQuantumMemory((size_t) extent_q+MaxStrokePad, sizeof(*stroke_q)); if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) { if (stroke_p != (PointInfo *) NULL) stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); if (stroke_q != (PointInfo *) NULL) stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PrimitiveInfo *) NULL); } slope.p=0.0; inverse_slope.p=0.0; if (fabs(dx.p) < MagickEpsilon) { if (dx.p >= 0.0) slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.p) < MagickEpsilon) { if (dy.p >= 0.0) inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.p=dy.p/dx.p; inverse_slope.p=(-1.0*PerceptibleReciprocal(slope.p)); } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid); if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse)) (void) TraceSquareLinecap(polygon_primitive,number_vertices,mid); offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0))); offset.y=(double) (offset.x*inverse_slope.p); if ((dy.p*offset.x-dx.p*offset.y) > 0.0) { box_p[0].x=polygon_primitive[0].point.x-offset.x; box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p; box_p[1].x=polygon_primitive[n].point.x-offset.x; box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p; box_q[0].x=polygon_primitive[0].point.x+offset.x; box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p; box_q[1].x=polygon_primitive[n].point.x+offset.x; box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p; } else { box_p[0].x=polygon_primitive[0].point.x+offset.x; box_p[0].y=polygon_primitive[0].point.y+offset.y; box_p[1].x=polygon_primitive[n].point.x+offset.x; box_p[1].y=polygon_primitive[n].point.y+offset.y; box_q[0].x=polygon_primitive[0].point.x-offset.x; box_q[0].y=polygon_primitive[0].point.y-offset.y; box_q[1].x=polygon_primitive[n].point.x-offset.x; box_q[1].y=polygon_primitive[n].point.y-offset.y; } /* Create strokes for the line join attribute: bevel, miter, round. */ p=0; q=0; stroke_q[p++]=box_q[0]; stroke_p[q++]=box_p[0]; for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++) { /* Compute the slope for this line segment, q. */ dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x; dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y; dot_product=dx.q*dx.q+dy.q*dy.q; if (dot_product < 0.25) continue; slope.q=0.0; inverse_slope.q=0.0; if (fabs(dx.q) < MagickEpsilon) { if (dx.q >= 0.0) slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.q) < MagickEpsilon) { if (dy.q >= 0.0) inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.q=dy.q/dx.q; inverse_slope.q=(-1.0*PerceptibleReciprocal(slope.q)); } offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0))); offset.y=(double) (offset.x*inverse_slope.q); dot_product=dy.q*offset.x-dx.q*offset.y; if (dot_product > 0.0) { box_p[2].x=polygon_primitive[n].point.x-offset.x; box_p[2].y=polygon_primitive[n].point.y-offset.y; box_p[3].x=polygon_primitive[i].point.x-offset.x; box_p[3].y=polygon_primitive[i].point.y-offset.y; box_q[2].x=polygon_primitive[n].point.x+offset.x; box_q[2].y=polygon_primitive[n].point.y+offset.y; box_q[3].x=polygon_primitive[i].point.x+offset.x; box_q[3].y=polygon_primitive[i].point.y+offset.y; } else { box_p[2].x=polygon_primitive[n].point.x+offset.x; box_p[2].y=polygon_primitive[n].point.y+offset.y; box_p[3].x=polygon_primitive[i].point.x+offset.x; box_p[3].y=polygon_primitive[i].point.y+offset.y; box_q[2].x=polygon_primitive[n].point.x-offset.x; box_q[2].y=polygon_primitive[n].point.y-offset.y; box_q[3].x=polygon_primitive[i].point.x-offset.x; box_q[3].y=polygon_primitive[i].point.y-offset.y; } if (fabs((double) (slope.p-slope.q)) < MagickEpsilon) { box_p[4]=box_p[1]; box_q[4]=box_q[1]; } else { box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+ box_p[3].y)/(slope.p-slope.q)); box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y); box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+ box_q[3].y)/(slope.p-slope.q)); box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y); } DisableMSCWarning(4127) CheckPathExtent(MaxStrokePad,MaxStrokePad); RestoreMSCWarning dot_product=dx.q*dy.p-dx.p*dy.q; if (dot_product <= 0.0) switch (draw_info->linejoin) { case BevelJoin: { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_p[p++]=box_p[4]; else { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { stroke_q[q++]=box_q[4]; stroke_p[p++]=box_p[4]; } else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_p[p++]=box_p[4]; else { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x); theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x); if (theta.q < theta.p) theta.q+=2.0*MagickPI; arc_segments=(size_t) CastDoubleToLong(ceil((double) ((theta. q-theta.p)/(2.0*sqrt(PerceptibleReciprocal(mid)))))); DisableMSCWarning(4127) CheckPathExtent(MaxStrokePad,arc_segments+MaxStrokePad); RestoreMSCWarning stroke_q[q].x=box_q[1].x; stroke_q[q].y=box_q[1].y; q++; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); stroke_q[q].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); stroke_q[q].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); q++; } stroke_q[q++]=box_q[2]; break; } default: break; } else switch (draw_info->linejoin) { case BevelJoin: { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_q[q++]=box_q[4]; else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { stroke_q[q++]=box_q[4]; stroke_p[p++]=box_p[4]; } else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_q[q++]=box_q[4]; else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x); theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x); if (theta.p < theta.q) theta.p+=2.0*MagickPI; arc_segments=(size_t) CastDoubleToLong(ceil((double) ((theta.p- theta.q)/(2.0*sqrt((double) (PerceptibleReciprocal(mid))))))); DisableMSCWarning(4127) CheckPathExtent(arc_segments+MaxStrokePad,MaxStrokePad); RestoreMSCWarning stroke_p[p++]=box_p[1]; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); stroke_p[p].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); stroke_p[p].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); p++; } stroke_p[p++]=box_p[2]; break; } default: break; } slope.p=slope.q; inverse_slope.p=inverse_slope.q; box_p[0]=box_p[2]; box_p[1]=box_p[3]; box_q[0]=box_q[2]; box_q[1]=box_q[3]; dx.p=dx.q; dy.p=dy.q; n=i; } stroke_p[p++]=box_p[1]; stroke_q[q++]=box_q[1]; /* Trace stroked polygon. */ stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon)); if (stroke_polygon == (PrimitiveInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return(stroke_polygon); } for (i=0; i < (ssize_t) p; i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_p[i]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; } for ( ; i < (ssize_t) (p+q+closed_path); i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_q[p+q+closed_path-(i+1)]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[p+closed_path].point; i++; } stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; stroke_polygon[i].primitive=UndefinedPrimitive; stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1); stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return(stroke_polygon); }
parallel-inl.h
// Copyright (c) 2018 Doyub Kim // // I am making my contributions/submissions to this project solely in my // personal capacity and am not conveying any rights to any intellectual // property of any third parties. #ifndef INCLUDE_JET_DETAIL_PARALLEL_INL_H_ #define INCLUDE_JET_DETAIL_PARALLEL_INL_H_ #include <jet/constants.h> #include <jet/macros.h> #include <algorithm> #include <functional> #include <future> #include <vector> #ifdef JET_TASKING_TBB #include <tbb/parallel_for.h> #include <tbb/parallel_reduce.h> #include <tbb/parallel_sort.h> #include <tbb/task.h> #elif defined(JET_TASKING_CPP11THREADS) #include <thread> #endif namespace jet { namespace internal { // NOTE - This abstraction takes a lambda which should take captured // variables by *value* to ensure no captured references race // with the task itself. template <typename TASK_T> inline void schedule(TASK_T&& fcn) { #ifdef JET_TASKING_TBB struct LocalTBBTask : public tbb::task { TASK_T func; tbb::task* execute() override { func(); return nullptr; } LocalTBBTask(TASK_T&& f) : func(std::forward<TASK_T>(f)) {} }; auto* tbb_node = new (tbb::task::allocate_root()) LocalTBBTask(std::forward<TASK_T>(fcn)); tbb::task::enqueue(*tbb_node); #elif defined(JET_TASKING_CPP11THREADS) std::thread thread(fcn); thread.detach(); #else // OpenMP or Serial --> synchronous! fcn(); #endif } template <typename TASK_T> using operator_return_t = typename std::result_of<TASK_T()>::type; // NOTE - see above, same issues associated with schedule() template <typename TASK_T> inline auto async(TASK_T&& fcn) -> std::future<operator_return_t<TASK_T>> { using package_t = std::packaged_task<operator_return_t<TASK_T>()>; auto task = new package_t(std::forward<TASK_T>(fcn)); auto future = task->get_future(); schedule([=]() { (*task)(); delete task; }); return future; } // Adopted from: // Radenski, A. // Shared Memory, Message Passing, and Hybrid Merge Sorts for Standalone and // Clustered SMPs. Proc PDPTA'11, the 2011 International Conference on Parallel // and Distributed Processing Techniques and Applications, CSREA Press // (H. Arabnia, Ed.), 2011, pp. 367 - 373. template <typename RandomIterator, typename RandomIterator2, typename CompareFunction> void merge(RandomIterator a, size_t size, RandomIterator2 temp, CompareFunction compareFunction) { size_t i1 = 0; size_t i2 = size / 2; size_t tempi = 0; while (i1 < size / 2 && i2 < size) { if (compareFunction(a[i1], a[i2])) { temp[tempi] = a[i1]; i1++; } else { temp[tempi] = a[i2]; i2++; } tempi++; } while (i1 < size / 2) { temp[tempi] = a[i1]; i1++; tempi++; } while (i2 < size) { temp[tempi] = a[i2]; i2++; tempi++; } // Copy sorted temp array into main array, a parallelFor(kZeroSize, size, [&](size_t i) { a[i] = temp[i]; }); } template <typename RandomIterator, typename RandomIterator2, typename CompareFunction> void parallelMergeSort(RandomIterator a, size_t size, RandomIterator2 temp, unsigned int numThreads, CompareFunction compareFunction) { if (numThreads == 1) { std::sort(a, a + size, compareFunction); } else if (numThreads > 1) { std::vector<std::future<void>> pool; pool.reserve(2); auto launchRange = [compareFunction](RandomIterator begin, size_t k2, RandomIterator2 temp, unsigned int numThreads) { parallelMergeSort(begin, k2, temp, numThreads, compareFunction); }; pool.emplace_back(internal::async( [=]() { launchRange(a, size / 2, temp, numThreads / 2); })); pool.emplace_back(internal::async([=]() { launchRange(a + size / 2, size - size / 2, temp + size / 2, numThreads - numThreads / 2); })); // Wait for jobs to finish for (auto& f : pool) { if (f.valid()) { f.wait(); } } merge(a, size, temp, compareFunction); } } } // namespace internal template <typename RandomIterator, typename T> void parallelFill(const RandomIterator& begin, const RandomIterator& end, const T& value, ExecutionPolicy policy) { auto diff = end - begin; if (diff <= 0) { return; } size_t size = static_cast<size_t>(diff); parallelFor(kZeroSize, size, [begin, value](size_t i) { begin[i] = value; }, policy); } // Adopted from http://ideone.com/Z7zldb template <typename IndexType, typename Function> void parallelFor(IndexType start, IndexType end, const Function& func, ExecutionPolicy policy) { if (start > end) { return; } #ifdef JET_TASKING_TBB if (policy == ExecutionPolicy::kParallel) { tbb::parallel_for(start, end, func); } else { for (auto i = start; i < end; ++i) { func(i); } } #elif JET_TASKING_CPP11THREADS // Estimate number of threads in the pool unsigned int numThreadsHint = maxNumberOfThreads(); const unsigned int numThreads = (policy == ExecutionPolicy::kParallel) ? (numThreadsHint == 0u ? 8u : numThreadsHint) : 1; // Size of a slice for the range functions IndexType n = end - start + 1; IndexType slice = (IndexType)std::round(n / static_cast<double>(numThreads)); slice = std::max(slice, IndexType(1)); // [Helper] Inner loop auto launchRange = [&func](IndexType k1, IndexType k2) { for (IndexType k = k1; k < k2; k++) { func(k); } }; // Create pool and launch jobs std::vector<std::thread> pool; pool.reserve(numThreads); IndexType i1 = start; IndexType i2 = std::min(start + slice, end); for (unsigned int i = 0; i + 1 < numThreads && i1 < end; ++i) { pool.emplace_back(launchRange, i1, i2); i1 = i2; i2 = std::min(i2 + slice, end); } if (i1 < end) { pool.emplace_back(launchRange, i1, end); } // Wait for jobs to finish for (std::thread& t : pool) { if (t.joinable()) { t.join(); } } #else #ifdef JET_TASKING_OPENMP if (policy == ExecutionPolicy::kParallel) { #pragma omp parallel for #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) for (ssize_t i = start; i < ssize_t(end); ++i) { #else // !MSVC || Intel for (auto i = start; i < end; ++i) { #endif // MSVC && !Intel func(i); } } else { for (auto i = start; i < end; ++i) { func(i); } } #else // JET_TASKING_OPENMP for (auto i = start; i < end; ++i) { func(i); } #endif // JET_TASKING_OPENMP #endif } template <typename IndexType, typename Function> void parallelRangeFor(IndexType start, IndexType end, const Function& func, ExecutionPolicy policy) { if (start > end) { return; } #ifdef JET_TASKING_TBB if (policy == ExecutionPolicy::kParallel) { tbb::parallel_for(tbb::blocked_range<IndexType>(start, end), [&func](const tbb::blocked_range<IndexType>& range) { func(range.begin(), range.end()); }); } else { func(start, end); } #else // Estimate number of threads in the pool unsigned int numThreadsHint = maxNumberOfThreads(); const unsigned int numThreads = (policy == ExecutionPolicy::kParallel) ? (numThreadsHint == 0u ? 8u : numThreadsHint) : 1; // Size of a slice for the range functions IndexType n = end - start + 1; IndexType slice = (IndexType)std::round(n / static_cast<double>(numThreads)); slice = std::max(slice, IndexType(1)); // Create pool and launch jobs std::vector<std::future<void>> pool; pool.reserve(numThreads); IndexType i1 = start; IndexType i2 = std::min(start + slice, end); for (unsigned int i = 0; i + 1 < numThreads && i1 < end; ++i) { pool.emplace_back(internal::async([=]() { func(i1, i2); })); i1 = i2; i2 = std::min(i2 + slice, end); } if (i1 < end) { pool.emplace_back(internal::async([=]() { func(i1, end); })); } // Wait for jobs to finish for (auto& f : pool) { if (f.valid()) { f.wait(); } } #endif } template <typename IndexType, typename Function> void parallelFor(IndexType beginIndexX, IndexType endIndexX, IndexType beginIndexY, IndexType endIndexY, const Function& function, ExecutionPolicy policy) { parallelFor(beginIndexY, endIndexY, [&](IndexType j) { for (IndexType i = beginIndexX; i < endIndexX; ++i) { function(i, j); } }, policy); } template <typename IndexType, typename Function> void parallelRangeFor(IndexType beginIndexX, IndexType endIndexX, IndexType beginIndexY, IndexType endIndexY, const Function& function, ExecutionPolicy policy) { parallelRangeFor(beginIndexY, endIndexY, [&](IndexType jBegin, IndexType jEnd) { function(beginIndexX, endIndexX, jBegin, jEnd); }, policy); } template <typename IndexType, typename Function> void parallelFor(IndexType beginIndexX, IndexType endIndexX, IndexType beginIndexY, IndexType endIndexY, IndexType beginIndexZ, IndexType endIndexZ, const Function& function, ExecutionPolicy policy) { parallelFor(beginIndexZ, endIndexZ, [&](IndexType k) { for (IndexType j = beginIndexY; j < endIndexY; ++j) { for (IndexType i = beginIndexX; i < endIndexX; ++i) { function(i, j, k); } } }, policy); } template <typename IndexType, typename Function> void parallelRangeFor(IndexType beginIndexX, IndexType endIndexX, IndexType beginIndexY, IndexType endIndexY, IndexType beginIndexZ, IndexType endIndexZ, const Function& function, ExecutionPolicy policy) { parallelRangeFor(beginIndexZ, endIndexZ, [&](IndexType kBegin, IndexType kEnd) { function(beginIndexX, endIndexX, beginIndexY, endIndexY, kBegin, kEnd); }, policy); } template <typename IndexType, typename Value, typename Function, typename Reduce> Value parallelReduce(IndexType start, IndexType end, const Value& identity, const Function& func, const Reduce& reduce, ExecutionPolicy policy) { if (start > end) { return identity; } #ifdef JET_TASKING_TBB if (policy == ExecutionPolicy::kParallel) { return tbb::parallel_reduce( tbb::blocked_range<IndexType>(start, end), identity, [&func](const tbb::blocked_range<IndexType>& range, const Value& init) { return func(range.begin(), range.end(), init); }, reduce); } else { (void)reduce; return func(start, end, identity); } #else // Estimate number of threads in the pool unsigned int numThreadsHint = maxNumberOfThreads(); const unsigned int numThreads = (policy == ExecutionPolicy::kParallel) ? (numThreadsHint == 0u ? 8u : numThreadsHint) : 1; // Size of a slice for the range functions IndexType n = end - start + 1; IndexType slice = (IndexType)std::round(n / static_cast<double>(numThreads)); slice = std::max(slice, IndexType(1)); // Results std::vector<Value> results(numThreads, identity); // [Helper] Inner loop auto launchRange = [&](IndexType k1, IndexType k2, unsigned int tid) { results[tid] = func(k1, k2, identity); }; // Create pool and launch jobs std::vector<std::future<void>> pool; pool.reserve(numThreads); IndexType i1 = start; IndexType i2 = std::min(start + slice, end); unsigned int tid = 0; for (; tid + 1 < numThreads && i1 < end; ++tid) { pool.emplace_back(internal::async([=]() { launchRange(i1, i2, tid); })); i1 = i2; i2 = std::min(i2 + slice, end); } if (i1 < end) { pool.emplace_back( internal::async([=]() { launchRange(i1, end, tid); })); } // Wait for jobs to finish for (auto& f : pool) { if (f.valid()) { f.wait(); } } // Gather Value finalResult = identity; for (const Value& val : results) { finalResult = reduce(val, finalResult); } return finalResult; #endif } template <typename RandomIterator, typename CompareFunction> void parallelSort(RandomIterator begin, RandomIterator end, CompareFunction compareFunction, ExecutionPolicy policy) { if (end < begin) { return; } #ifdef JET_TASKING_TBB if (policy == ExecutionPolicy::kParallel) { tbb::parallel_sort(begin, end, compareFunction); } else { std::sort(begin, end, compareFunction); } #else size_t size = static_cast<size_t>(end - begin); typedef typename std::iterator_traits<RandomIterator>::value_type value_type; std::vector<value_type> temp(size); // Estimate number of threads in the pool unsigned int numThreadsHint = maxNumberOfThreads(); const unsigned int numThreads = (policy == ExecutionPolicy::kParallel) ? (numThreadsHint == 0u ? 8u : numThreadsHint) : 1; internal::parallelMergeSort(begin, size, temp.begin(), numThreads, compareFunction); #endif } template <typename RandomIterator> void parallelSort(RandomIterator begin, RandomIterator end, ExecutionPolicy policy) { parallelSort( begin, end, std::less<typename std::iterator_traits<RandomIterator>::value_type>(), policy); } } // namespace jet #endif // INCLUDE_JET_DETAIL_PARALLEL_INL_H_
distance.c
#include "distance.h" #include <math.h> int set_distance_metric(int metric) { switch (metric) { case 0: distance_metric = euclidian_distance; break; case 1: distance_metric = cosine_distance; break; default: return 0; } return 1; } /* * Assigns centroids to each datapoint */ void assign_centroids(double *data, double *centroids, int *assignments, int k, int N, int D) { assign_centroids_multi(data, centroids, assignments, 1, k, N, D); } /* * Assigns centroids to each datapoint using multiple threads */ void assign_centroids_multi(double *data, double *centroids, int *assignments, int n_jobs, int k, int N, int D) { #pragma omp parallel shared(data, centroids, assignments) num_threads(n_jobs) for(int i=0; i<N; i++) { assignments[i] = closest_centroid(data + i*D, centroids, k, D); } } /* * Returns the squared euclidian distance between vectors A and B of length D */ double euclidian_distance(double *A, double *B, int D) { double d = 0.0; double dx = 0.0; for (int i=0; i<D; i++) { dx = (*A++) - (*B++); d += dx * dx; } return d; } /* * Returns the cosine distance between vectors A and B of length D */ double cosine_distance(double *A, double *B, int D) { double dot = 0.0; double lenA = 0.0, lenB = 0.0; for (int i=0; i<D; i++) { dot += (*A) * (*B); lenA += (*A) * (*A); lenB += (*B) * (*B); A++; B++; } return dot / (sqrt(lenA) * sqrt(lenB)); } /* * Returns the index of the closest centroid to the inputted vector where k is * the number of centroids and D is the dimensionality of the space */ int closest_centroid(double *vector, double *centroids, int k, int D) { int c = -1; double min_distance, cur_distance; for(int i=0; i<k; i++) { cur_distance = distance_metric(vector, (centroids + i * D), D); if (c == -1 || cur_distance < min_distance) { c = i; min_distance = cur_distance; } } return c; } /* * Returns the distance to the closest centroid to the inputted vector where k is * the number of centroids and D is the dimensionality of the space */ double distance_to_closest_centroid(double *vector, double *centroids, int k, int D) { double min_distance = -1.0; double cur_distance; for(int i=0; i<k; i++) { cur_distance = distance_metric(vector, (centroids + i * D), D); if (min_distance < 0 || cur_distance < min_distance) { min_distance = cur_distance; } } return min_distance; }
libimagequant.c
/* pngquant.c - quantize the colors in an alphamap down to a specified number ** ** Copyright (C) 1989, 1991 by Jef Poskanzer. ** Copyright (C) 1997, 2000, 2002 by Greg Roelofs; based on an idea by ** Stefan Schneider. ** © 2009-2013 by Kornel Lesinski. ** ** Permission to use, copy, modify, and distribute this software and its ** documentation for any purpose and without fee is hereby granted, provided ** that the above copyright notice appear in all copies and that both that ** copyright notice and this permission notice appear in supporting ** documentation. This software is provided "as is" without express or ** implied warranty. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdarg.h> #include <stdbool.h> #include <stdint.h> #include <limits.h> #if !(defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199900L) && !(defined(_MSC_VER) && _MSC_VER >= 1800) #error "This program requires C99, e.g. -std=c99 switch in GCC or it requires MSVC 18.0 or higher." #error "Ignore torrent of syntax errors that may follow. It's only because compiler is set to use too old C version." #endif #ifdef _OPENMP #include <omp.h> #else #define omp_get_max_threads() 1 #define omp_get_thread_num() 0 #endif #include "libimagequant.h" #include "pam.h" #include "mediancut.h" #include "nearest.h" #include "blur.h" #include "viter.h" #define LIQ_HIGH_MEMORY_LIMIT (1<<26) /* avoid allocating buffers larger than 64MB */ // each structure has a pointer as a unique identifier that allows type checking at run time static const char *const liq_attr_magic = "liq_attr", *const liq_image_magic = "liq_image", *const liq_result_magic = "liq_result", *const liq_remapping_result_magic = "liq_remapping_result", *const liq_freed_magic = "free"; #define CHECK_STRUCT_TYPE(attr, kind) liq_crash_if_invalid_handle_pointer_given((const liq_attr*)attr, kind ## _magic) #define CHECK_USER_POINTER(ptr) liq_crash_if_invalid_pointer_given(ptr) struct liq_attr { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); double target_mse, max_mse, voronoi_iteration_limit; float min_opaque_val; unsigned int max_colors, max_histogram_entries; unsigned int min_posterization_output /* user setting */, min_posterization_input /* speed setting */; unsigned int voronoi_iterations, feedback_loop_trials; bool last_index_transparent, use_contrast_maps, use_dither_map, fast_palette; unsigned int speed; liq_log_callback_function *log_callback; void *log_callback_user_info; liq_log_flush_callback_function *log_flush_callback; void *log_flush_callback_user_info; }; struct liq_image { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); f_pixel *f_pixels; rgba_pixel **rows; double gamma; unsigned int width, height; unsigned char *noise, *edges, *dither_map; rgba_pixel *pixels, *temp_row; f_pixel *temp_f_row; liq_image_get_rgba_row_callback *row_callback; void *row_callback_user_info; float min_opaque_val; f_pixel fixed_colors[256]; unsigned short fixed_colors_count; bool free_pixels, free_rows, free_rows_internal; }; typedef struct liq_remapping_result { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); unsigned char *pixels; colormap *palette; liq_palette int_palette; double gamma, palette_error; float dither_level; bool use_dither_map; } liq_remapping_result; struct liq_result { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); liq_remapping_result *remapping; colormap *palette; liq_palette int_palette; float dither_level; double gamma, palette_error; int min_posterization_output; bool use_dither_map, fast_palette; }; static liq_result *pngquant_quantize(histogram *hist, const liq_attr *options, const liq_image *img); static void modify_alpha(liq_image *input_image, rgba_pixel *const row_pixels); static void contrast_maps(liq_image *image); static histogram *get_histogram(liq_image *input_image, const liq_attr *options); static const rgba_pixel *liq_image_get_row_rgba(liq_image *input_image, unsigned int row); static const f_pixel *liq_image_get_row_f(liq_image *input_image, unsigned int row); static void liq_remapping_result_destroy(liq_remapping_result *result); static void liq_verbose_printf(const liq_attr *context, const char *fmt, ...) { if (context->log_callback) { va_list va; va_start(va, fmt); int required_space = vsnprintf(NULL, 0, fmt, va)+1; // +\0 va_end(va); char buf[required_space]; va_start(va, fmt); vsnprintf(buf, required_space, fmt, va); va_end(va); context->log_callback(context, buf, context->log_callback_user_info); } } inline static void verbose_print(const liq_attr *attr, const char *msg) { if (attr->log_callback) { attr->log_callback(attr, msg, attr->log_callback_user_info); } } static void liq_verbose_printf_flush(liq_attr *attr) { if (attr->log_flush_callback) { attr->log_flush_callback(attr, attr->log_flush_callback_user_info); } } #if USE_SSE inline static bool is_sse_available() { #if (defined(__x86_64__) || defined(__amd64)) return true; #else int a,b,c,d; cpuid(1, a, b, c, d); return d & (1<<25); // edx bit 25 is set when SSE is present #endif } #endif /* make it clear in backtrace when user-supplied handle points to invalid memory */ NEVER_INLINE LIQ_EXPORT bool liq_crash_if_invalid_handle_pointer_given(const liq_attr *user_supplied_pointer, const char *const expected_magic_header); LIQ_EXPORT bool liq_crash_if_invalid_handle_pointer_given(const liq_attr *user_supplied_pointer, const char *const expected_magic_header) { if (!user_supplied_pointer) { return false; } if (user_supplied_pointer->magic_header == liq_freed_magic) { fprintf(stderr, "%s used after being freed", expected_magic_header); // this is not normal error handling, this is programmer error that should crash the program. // program cannot safely continue if memory has been used after it's been freed. // abort() is nasty, but security vulnerability may be worse. abort(); } return user_supplied_pointer->magic_header == expected_magic_header; } NEVER_INLINE LIQ_EXPORT bool liq_crash_if_invalid_pointer_given(void *pointer); LIQ_EXPORT bool liq_crash_if_invalid_pointer_given(void *pointer) { if (!pointer) { return false; } // Force a read from the given (potentially invalid) memory location in order to check early whether this crashes the program or not. // It doesn't matter what value is read, the code here is just to shut the compiler up about unused read. char test_access = *((volatile char *)pointer); return test_access || true; } static void liq_log_error(const liq_attr *attr, const char *msg) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; liq_verbose_printf(attr, " error: %s", msg); } static double quality_to_mse(long quality) { if (quality == 0) { return MAX_DIFF; } if (quality == 100) { return 0; } // curve fudged to be roughly similar to quality of libjpeg // except lowest 10 for really low number of colors const double extra_low_quality_fudge = MAX(0,0.016/(0.001+quality) - 0.001); return extra_low_quality_fudge + 2.5/pow(210.0 + quality, 1.2) * (100.1-quality)/100.0; } static unsigned int mse_to_quality(double mse) { for(int i=100; i > 0; i--) { if (mse <= quality_to_mse(i) + 0.000001) { // + epsilon for floating point errors return i; } } return 0; } LIQ_EXPORT liq_error liq_set_quality(liq_attr* attr, int minimum, int target) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (target < 0 || target > 100 || target < minimum || minimum < 0) return LIQ_VALUE_OUT_OF_RANGE; attr->target_mse = quality_to_mse(target); attr->max_mse = quality_to_mse(minimum); return LIQ_OK; } LIQ_EXPORT int liq_get_min_quality(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return mse_to_quality(attr->max_mse); } LIQ_EXPORT int liq_get_max_quality(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return mse_to_quality(attr->target_mse); } LIQ_EXPORT liq_error liq_set_max_colors(liq_attr* attr, int colors) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (colors < 2 || colors > 256) return LIQ_VALUE_OUT_OF_RANGE; attr->max_colors = colors; return LIQ_OK; } LIQ_EXPORT int liq_get_max_colors(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return attr->max_colors; } LIQ_EXPORT liq_error liq_set_min_posterization(liq_attr *attr, int bits) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (bits < 0 || bits > 4) return LIQ_VALUE_OUT_OF_RANGE; attr->min_posterization_output = bits; return LIQ_OK; } LIQ_EXPORT int liq_get_min_posterization(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return attr->min_posterization_output; } LIQ_EXPORT liq_error liq_set_speed(liq_attr* attr, int speed) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (speed < 1 || speed > 10) return LIQ_VALUE_OUT_OF_RANGE; int iterations = MAX(8-speed,0); iterations += iterations * iterations/2; attr->voronoi_iterations = iterations; attr->voronoi_iteration_limit = 1.0/(double)(1<<(23-speed)); attr->feedback_loop_trials = MAX(56-9*speed, 0); attr->max_histogram_entries = (1<<17) + (1<<18)*(10-speed); attr->min_posterization_input = (speed >= 8) ? 1 : 0; attr->fast_palette = (speed >= 7); attr->use_dither_map = (speed <= (omp_get_max_threads() > 1 ? 7 : 5)); // parallelized dither map might speed up floyd remapping attr->use_contrast_maps = (speed <= 7) || attr->use_dither_map; attr->speed = speed; return LIQ_OK; } LIQ_EXPORT int liq_get_speed(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return attr->speed; } LIQ_EXPORT liq_error liq_set_output_gamma(liq_result* res, double gamma) { if (!CHECK_STRUCT_TYPE(res, liq_result)) return LIQ_INVALID_POINTER; if (gamma <= 0 || gamma >= 1.0) return LIQ_VALUE_OUT_OF_RANGE; if (res->remapping) { liq_remapping_result_destroy(res->remapping); res->remapping = NULL; } res->gamma = gamma; return LIQ_OK; } LIQ_EXPORT liq_error liq_set_min_opacity(liq_attr* attr, int min) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (min < 0 || min > 255) return LIQ_VALUE_OUT_OF_RANGE; attr->min_opaque_val = (double)min/255.0; return LIQ_OK; } LIQ_EXPORT int liq_get_min_opacity(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return MIN(255, 256.0 * attr->min_opaque_val); } LIQ_EXPORT void liq_set_last_index_transparent(liq_attr* attr, int is_last) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; attr->last_index_transparent = !!is_last; } LIQ_EXPORT void liq_set_log_callback(liq_attr *attr, liq_log_callback_function *callback, void* user_info) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; liq_verbose_printf_flush(attr); attr->log_callback = callback; attr->log_callback_user_info = user_info; } LIQ_EXPORT void liq_set_log_flush_callback(liq_attr *attr, liq_log_flush_callback_function *callback, void* user_info) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; attr->log_flush_callback = callback; attr->log_flush_callback_user_info = user_info; } LIQ_EXPORT liq_attr* liq_attr_create() { return liq_attr_create_with_allocator(NULL, NULL); } LIQ_EXPORT void liq_attr_destroy(liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) { return; } liq_verbose_printf_flush(attr); attr->magic_header = liq_freed_magic; attr->free(attr); } LIQ_EXPORT liq_attr* liq_attr_copy(liq_attr *orig) { if (!CHECK_STRUCT_TYPE(orig, liq_attr)) { return NULL; } liq_attr *attr = orig->malloc(sizeof(liq_attr)); if (!attr) return NULL; *attr = *orig; return attr; } static void *liq_aligned_malloc(size_t size) { unsigned char *ptr = malloc(size + 16); if (!ptr) { return NULL; } uintptr_t offset = 16 - ((uintptr_t)ptr & 15); // also reserves 1 byte for ptr[-1] ptr += offset; assert(0 == (((uintptr_t)ptr) & 15)); ptr[-1] = offset ^ 0x59; // store how much pointer was shifted to get the original for free() return ptr; } static void liq_aligned_free(void *inptr) { unsigned char *ptr = inptr; size_t offset = ptr[-1] ^ 0x59; assert(offset > 0 && offset <= 16); free(ptr - offset); } LIQ_EXPORT liq_attr* liq_attr_create_with_allocator(void* (*custom_malloc)(size_t), void (*custom_free)(void*)) { #if USE_SSE if (!is_sse_available()) { return NULL; } #endif if (!custom_malloc && !custom_free) { custom_malloc = liq_aligned_malloc; custom_free = liq_aligned_free; } else if (!custom_malloc != !custom_free) { return NULL; // either specify both or none } liq_attr *attr = custom_malloc(sizeof(liq_attr)); if (!attr) return NULL; *attr = (liq_attr) { .magic_header = liq_attr_magic, .malloc = custom_malloc, .free = custom_free, .max_colors = 256, .min_opaque_val = 1, // whether preserve opaque colors for IE (1.0=no, does not affect alpha) .last_index_transparent = false, // puts transparent color at last index. This is workaround for blu-ray subtitles. .target_mse = 0, .max_mse = MAX_DIFF, }; liq_set_speed(attr, 3); return attr; } LIQ_EXPORT liq_error liq_image_add_fixed_color(liq_image *img, liq_color color) { if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER; if (img->fixed_colors_count > 255) return LIQ_BUFFER_TOO_SMALL; float gamma_lut[256]; to_f_set_gamma(gamma_lut, img->gamma); img->fixed_colors[img->fixed_colors_count++] = to_f(gamma_lut, (rgba_pixel){ .r = color.r, .g = color.g, .b = color.b, .a = color.a, }); return LIQ_OK; } static bool liq_image_use_low_memory(liq_image *img) { img->temp_f_row = img->malloc(sizeof(img->f_pixels[0]) * img->width * omp_get_max_threads()); return img->temp_f_row != NULL; } static bool liq_image_should_use_low_memory(liq_image *img, const bool low_memory_hint) { return img->width * img->height > (low_memory_hint ? LIQ_HIGH_MEMORY_LIMIT/8 : LIQ_HIGH_MEMORY_LIMIT) / sizeof(f_pixel); // Watch out for integer overflow } static liq_image *liq_image_create_internal(liq_attr *attr, rgba_pixel* rows[], liq_image_get_rgba_row_callback *row_callback, void *row_callback_user_info, int width, int height, double gamma) { if (gamma < 0 || gamma > 1.0) { liq_log_error(attr, "gamma must be >= 0 and <= 1 (try 1/gamma instead)"); return NULL; } if (!rows && !row_callback) { liq_log_error(attr, "missing row data"); return NULL; } liq_image *img = attr->malloc(sizeof(liq_image)); if (!img) return NULL; *img = (liq_image){ .magic_header = liq_image_magic, .malloc = attr->malloc, .free = attr->free, .width = width, .height = height, .gamma = gamma ? gamma : 0.45455, .rows = rows, .row_callback = row_callback, .row_callback_user_info = row_callback_user_info, .min_opaque_val = attr->min_opaque_val, }; if (!rows || attr->min_opaque_val < 1.f) { img->temp_row = attr->malloc(sizeof(img->temp_row[0]) * width * omp_get_max_threads()); if (!img->temp_row) return NULL; } // if image is huge or converted pixels are not likely to be reused then don't cache converted pixels if (liq_image_should_use_low_memory(img, !img->temp_row && !attr->use_contrast_maps && !attr->use_dither_map)) { verbose_print(attr, " conserving memory"); if (!liq_image_use_low_memory(img)) return NULL; } if (img->min_opaque_val < 1.f) { verbose_print(attr, " Working around IE6 bug by making image less transparent..."); } return img; } LIQ_EXPORT liq_error liq_image_set_memory_ownership(liq_image *img, int ownership_flags) { if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER; if (!img->rows || !ownership_flags || (ownership_flags & ~(LIQ_OWN_ROWS|LIQ_OWN_PIXELS))) { return LIQ_VALUE_OUT_OF_RANGE; } if (ownership_flags & LIQ_OWN_ROWS) { if (img->free_rows_internal) return LIQ_VALUE_OUT_OF_RANGE; img->free_rows = true; } if (ownership_flags & LIQ_OWN_PIXELS) { img->free_pixels = true; if (!img->pixels) { // for simplicity of this API there's no explicit bitmap argument, // so the row with the lowest address is assumed to be at the start of the bitmap img->pixels = img->rows[0]; for(unsigned int i=1; i < img->height; i++) { img->pixels = MIN(img->pixels, img->rows[i]); } } } return LIQ_OK; } static bool check_image_size(const liq_attr *attr, const int width, const int height) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) { return false; } if (width <= 0 || height <= 0) { liq_log_error(attr, "width and height must be > 0"); return false; } if (width > INT_MAX/height) { liq_log_error(attr, "image too large"); return false; } return true; } LIQ_EXPORT liq_image *liq_image_create_custom(liq_attr *attr, liq_image_get_rgba_row_callback *row_callback, void* user_info, int width, int height, double gamma) { if (!check_image_size(attr, width, height)) { return NULL; } return liq_image_create_internal(attr, NULL, row_callback, user_info, width, height, gamma); } LIQ_EXPORT liq_image *liq_image_create_rgba_rows(liq_attr *attr, void* rows[], int width, int height, double gamma) { if (!check_image_size(attr, width, height)) { return NULL; } for(int i=0; i < height; i++) { if (!CHECK_USER_POINTER(rows+i) || !CHECK_USER_POINTER(rows[i])) { liq_log_error(attr, "invalid row pointers"); return NULL; } } return liq_image_create_internal(attr, (rgba_pixel**)rows, NULL, NULL, width, height, gamma); } LIQ_EXPORT liq_image *liq_image_create_rgba(liq_attr *attr, void* bitmap, int width, int height, double gamma) { if (!check_image_size(attr, width, height)) { return NULL; } if (!CHECK_USER_POINTER(bitmap)) { liq_log_error(attr, "invalid bitmap pointer"); return NULL; } rgba_pixel *pixels = bitmap; rgba_pixel **rows = attr->malloc(sizeof(rows[0])*height); if (!rows) return NULL; for(int i=0; i < height; i++) { rows[i] = pixels + width * i; } liq_image *image = liq_image_create_internal(attr, rows, NULL, NULL, width, height, gamma); if (!image) { attr->free(rows); return NULL; } image->free_rows = true; image->free_rows_internal = true; return image; } NEVER_INLINE LIQ_EXPORT void liq_executing_user_callback(liq_image_get_rgba_row_callback *callback, liq_color *temp_row, int row, int width, void *user_info); LIQ_EXPORT void liq_executing_user_callback(liq_image_get_rgba_row_callback *callback, liq_color *temp_row, int row, int width, void *user_info) { assert(callback); assert(temp_row); callback(temp_row, row, width, user_info); } inline static bool liq_image_can_use_rows(liq_image *img) { const bool iebug = img->min_opaque_val < 1.f; return (img->rows && !iebug); } static const rgba_pixel *liq_image_get_row_rgba(liq_image *img, unsigned int row) { if (liq_image_can_use_rows(img)) { return img->rows[row]; } assert(img->temp_row); rgba_pixel *temp_row = img->temp_row + img->width * omp_get_thread_num(); if (img->rows) { memcpy(temp_row, img->rows[row], img->width * sizeof(temp_row[0])); } else { liq_executing_user_callback(img->row_callback, (liq_color*)temp_row, row, img->width, img->row_callback_user_info); } if (img->min_opaque_val < 1.f) modify_alpha(img, temp_row); return temp_row; } static void convert_row_to_f(liq_image *img, f_pixel *row_f_pixels, const unsigned int row, const float gamma_lut[]) { assert(row_f_pixels); assert(!USE_SSE || 0 == ((uintptr_t)row_f_pixels & 15)); const rgba_pixel *const row_pixels = liq_image_get_row_rgba(img, row); for(unsigned int col=0; col < img->width; col++) { row_f_pixels[col] = to_f(gamma_lut, row_pixels[col]); } } static const f_pixel *liq_image_get_row_f(liq_image *img, unsigned int row) { if (!img->f_pixels) { if (img->temp_f_row) { float gamma_lut[256]; to_f_set_gamma(gamma_lut, img->gamma); f_pixel *row_for_thread = img->temp_f_row + img->width * omp_get_thread_num(); convert_row_to_f(img, row_for_thread, row, gamma_lut); return row_for_thread; } assert(omp_get_thread_num() == 0); if (!liq_image_should_use_low_memory(img, false)) { img->f_pixels = img->malloc(sizeof(img->f_pixels[0]) * img->width * img->height); } if (!img->f_pixels) { if (!liq_image_use_low_memory(img)) return NULL; return liq_image_get_row_f(img, row); } float gamma_lut[256]; to_f_set_gamma(gamma_lut, img->gamma); for(unsigned int i=0; i < img->height; i++) { convert_row_to_f(img, &img->f_pixels[i*img->width], i, gamma_lut); } } return img->f_pixels + img->width * row; } LIQ_EXPORT int liq_image_get_width(const liq_image *input_image) { if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return -1; return input_image->width; } LIQ_EXPORT int liq_image_get_height(const liq_image *input_image) { if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return -1; return input_image->height; } typedef void free_func(void*); free_func *get_default_free_func(liq_image *img) { // When default allocator is used then user-supplied pointers must be freed with free() if (img->free_rows_internal || img->free != liq_aligned_free) { return img->free; } return free; } static void liq_image_free_rgba_source(liq_image *input_image) { if (input_image->free_pixels && input_image->pixels) { get_default_free_func(input_image)(input_image->pixels); input_image->pixels = NULL; } if (input_image->free_rows && input_image->rows) { get_default_free_func(input_image)(input_image->rows); input_image->rows = NULL; } } LIQ_EXPORT void liq_image_destroy(liq_image *input_image) { if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return; liq_image_free_rgba_source(input_image); if (input_image->noise) { input_image->free(input_image->noise); } if (input_image->edges) { input_image->free(input_image->edges); } if (input_image->dither_map) { input_image->free(input_image->dither_map); } if (input_image->f_pixels) { input_image->free(input_image->f_pixels); } if (input_image->temp_row) { input_image->free(input_image->temp_row); } if (input_image->temp_f_row) { input_image->free(input_image->temp_f_row); } input_image->magic_header = liq_freed_magic; input_image->free(input_image); } LIQ_EXPORT liq_result *liq_quantize_image(liq_attr *attr, liq_image *img) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return NULL; if (!CHECK_STRUCT_TYPE(img, liq_image)) { liq_log_error(attr, "invalid image pointer"); return NULL; } histogram *hist = get_histogram(img, attr); if (!hist) { return NULL; } liq_result *result = pngquant_quantize(hist, attr, img); pam_freeacolorhist(hist); return result; } LIQ_EXPORT liq_error liq_set_dithering_level(liq_result *res, float dither_level) { if (!CHECK_STRUCT_TYPE(res, liq_result)) return LIQ_INVALID_POINTER; if (res->remapping) { liq_remapping_result_destroy(res->remapping); res->remapping = NULL; } if (res->dither_level < 0 || res->dither_level > 1.0f) return LIQ_VALUE_OUT_OF_RANGE; res->dither_level = dither_level; return LIQ_OK; } static liq_remapping_result *liq_remapping_result_create(liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) { return NULL; } liq_remapping_result *res = result->malloc(sizeof(liq_remapping_result)); if (!res) return NULL; *res = (liq_remapping_result) { .magic_header = liq_remapping_result_magic, .malloc = result->malloc, .free = result->free, .dither_level = result->dither_level, .use_dither_map = result->use_dither_map, .palette_error = result->palette_error, .gamma = result->gamma, .palette = pam_duplicate_colormap(result->palette), }; return res; } LIQ_EXPORT double liq_get_output_gamma(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; return result->gamma; } static void liq_remapping_result_destroy(liq_remapping_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_remapping_result)) return; if (result->palette) pam_freecolormap(result->palette); if (result->pixels) result->free(result->pixels); result->magic_header = liq_freed_magic; result->free(result); } LIQ_EXPORT void liq_result_destroy(liq_result *res) { if (!CHECK_STRUCT_TYPE(res, liq_result)) return; memset(&res->int_palette, 0, sizeof(liq_palette)); if (res->remapping) { memset(&res->remapping->int_palette, 0, sizeof(liq_palette)); liq_remapping_result_destroy(res->remapping); } pam_freecolormap(res->palette); res->magic_header = liq_freed_magic; res->free(res); } LIQ_EXPORT double liq_get_quantization_error(liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; if (result->palette_error >= 0) { return result->palette_error*65536.0/6.0; } if (result->remapping && result->remapping->palette_error >= 0) { return result->remapping->palette_error*65536.0/6.0; } return result->palette_error; } LIQ_EXPORT int liq_get_quantization_quality(liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; if (result->palette_error >= 0) { return mse_to_quality(result->palette_error); } if (result->remapping && result->remapping->palette_error >= 0) { return mse_to_quality(result->remapping->palette_error); } return result->palette_error; } static int compare_popularity(const void *ch1, const void *ch2) { const float v1 = ((const colormap_item*)ch1)->popularity; const float v2 = ((const colormap_item*)ch2)->popularity; return v1 > v2 ? -1 : 1; } static void sort_palette_qsort(colormap *map, int start, int nelem) { qsort(map->palette + start, nelem, sizeof(map->palette[0]), compare_popularity); } #define SWAP_PALETTE(map, a,b) { \ const colormap_item tmp = (map)->palette[(a)]; \ (map)->palette[(a)] = (map)->palette[(b)]; \ (map)->palette[(b)] = tmp; } static void sort_palette(colormap *map, const liq_attr *options) { /* ** Step 3.5 [GRR]: remap the palette colors so that all entries with ** the maximal alpha value (i.e., fully opaque) are at the end and can ** therefore be omitted from the tRNS chunk. */ if (options->last_index_transparent) { for(unsigned int i=0; i < map->colors; i++) { if (map->palette[i].acolor.a < 1.0/256.0) { const unsigned int old = i, transparent_dest = map->colors-1; SWAP_PALETTE(map, transparent_dest, old); /* colors sorted by popularity make pngs slightly more compressible */ sort_palette_qsort(map, 0, map->colors-1); return; } } } /* move transparent colors to the beginning to shrink trns chunk */ unsigned int num_transparent=0; for(unsigned int i=0; i < map->colors; i++) { if (map->palette[i].acolor.a < 255.0/256.0) { // current transparent color is swapped with earlier opaque one if (i != num_transparent) { SWAP_PALETTE(map, num_transparent, i); i--; } num_transparent++; } } liq_verbose_printf(options, " eliminated opaque tRNS-chunk entries...%d entr%s transparent", num_transparent, (num_transparent == 1)? "y" : "ies"); /* colors sorted by popularity make pngs slightly more compressible * opaque and transparent are sorted separately */ sort_palette_qsort(map, 0, num_transparent); sort_palette_qsort(map, num_transparent, map->colors-num_transparent); if (map->colors > 16) { SWAP_PALETTE(map, 7, 1); // slightly improves compression SWAP_PALETTE(map, 8, 2); SWAP_PALETTE(map, 9, 3); } } inline static unsigned int posterize_channel(unsigned int color, unsigned int bits) { return (color & ~((1<<bits)-1)) | (color >> (8-bits)); } static void set_rounded_palette(liq_palette *const dest, colormap *const map, const double gamma, unsigned int posterize) { float gamma_lut[256]; to_f_set_gamma(gamma_lut, gamma); dest->count = map->colors; for(unsigned int x = 0; x < map->colors; ++x) { rgba_pixel px = to_rgb(gamma, map->palette[x].acolor); px.r = posterize_channel(px.r, posterize); px.g = posterize_channel(px.g, posterize); px.b = posterize_channel(px.b, posterize); px.a = posterize_channel(px.a, posterize); map->palette[x].acolor = to_f(gamma_lut, px); /* saves rounding error introduced by to_rgb, which makes remapping & dithering more accurate */ if (!px.a) { px.r = 'L'; px.g = 'i'; px.b = 'q'; } dest->entries[x] = (liq_color){.r=px.r,.g=px.g,.b=px.b,.a=px.a}; } } LIQ_EXPORT const liq_palette *liq_get_palette(liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return NULL; if (result->remapping && result->remapping->int_palette.count) { return &result->remapping->int_palette; } if (!result->int_palette.count) { set_rounded_palette(&result->int_palette, result->palette, result->gamma, result->min_posterization_output); } return &result->int_palette; } static float remap_to_palette(liq_image *const input_image, unsigned char *const *const output_pixels, colormap *const map, const bool fast) { const int rows = input_image->height; const unsigned int cols = input_image->width; const float min_opaque_val = input_image->min_opaque_val; double remapping_error=0; if (!liq_image_get_row_f(input_image, 0)) { // trigger lazy conversion return -1; } struct nearest_map *const n = nearest_init(map, fast); const unsigned int max_threads = omp_get_max_threads(); viter_state average_color[(VITER_CACHE_LINE_GAP+map->colors) * max_threads]; viter_init(map, max_threads, average_color); #pragma omp parallel for if (rows*cols > 3000) \ schedule(static) default(none) shared(average_color) reduction(+:remapping_error) for(int row = 0; row < rows; ++row) { const f_pixel *const row_pixels = liq_image_get_row_f(input_image, row); unsigned int last_match=0; for(unsigned int col = 0; col < cols; ++col) { f_pixel px = row_pixels[col]; float diff; output_pixels[row][col] = last_match = nearest_search(n, px, last_match, min_opaque_val, &diff); remapping_error += diff; viter_update_color(px, 1.0, map, last_match, omp_get_thread_num(), average_color); } } viter_finalize(map, max_threads, average_color); nearest_free(n); return remapping_error / (input_image->width * input_image->height); } inline static f_pixel get_dithered_pixel(const float dither_level, const float max_dither_error, const f_pixel thiserr, const f_pixel px) { /* Use Floyd-Steinberg errors to adjust actual color. */ const float sr = thiserr.r * dither_level, sg = thiserr.g * dither_level, sb = thiserr.b * dither_level, sa = thiserr.a * dither_level; float ratio = 1.0; // allowing some overflow prevents undithered bands caused by clamping of all channels if (px.r + sr > 1.03) ratio = MIN(ratio, (1.03-px.r)/sr); else if (px.r + sr < 0) ratio = MIN(ratio, px.r/-sr); if (px.g + sg > 1.03) ratio = MIN(ratio, (1.03-px.g)/sg); else if (px.g + sg < 0) ratio = MIN(ratio, px.g/-sg); if (px.b + sb > 1.03) ratio = MIN(ratio, (1.03-px.b)/sb); else if (px.b + sb < 0) ratio = MIN(ratio, px.b/-sb); float a = px.a + sa; if (a > 1.0) { a = 1.0; } else if (a < 0) { a = 0; } // If dithering error is crazy high, don't propagate it that much // This prevents crazy geen pixels popping out of the blue (or red or black! ;) const float dither_error = sr*sr + sg*sg + sb*sb + sa*sa; if (dither_error > max_dither_error) { ratio *= 0.8; } else if (dither_error < 2.f/256.f/256.f) { // don't dither areas that don't have noticeable error — makes file smaller return px; } return (f_pixel){ .r=px.r + sr * ratio, .g=px.g + sg * ratio, .b=px.b + sb * ratio, .a=a, }; } /** Uses edge/noise map to apply dithering only to flat areas. Dithering on edges creates jagged lines, and noisy areas are "naturally" dithered. If output_image_is_remapped is true, only pixels noticeably changed by error diffusion will be written to output image. */ static void remap_to_palette_floyd(liq_image *input_image, unsigned char *const output_pixels[], const colormap *map, const float max_dither_error, const bool use_dither_map, const bool output_image_is_remapped, float base_dithering_level) { const unsigned int rows = input_image->height, cols = input_image->width; const unsigned char *dither_map = use_dither_map ? (input_image->dither_map ? input_image->dither_map : input_image->edges) : NULL; const float min_opaque_val = input_image->min_opaque_val; const colormap_item *acolormap = map->palette; struct nearest_map *const n = nearest_init(map, false); /* Initialize Floyd-Steinberg error vectors. */ f_pixel *restrict thiserr, *restrict nexterr; thiserr = input_image->malloc((cols + 2) * sizeof(*thiserr) * 2); // +2 saves from checking out of bounds access nexterr = thiserr + (cols + 2); srand(12345); /* deterministic dithering is better for comparing results */ if (!thiserr) return; for (unsigned int col = 0; col < cols + 2; ++col) { const double rand_max = RAND_MAX; thiserr[col].r = ((double)rand() - rand_max/2.0)/rand_max/255.0; thiserr[col].g = ((double)rand() - rand_max/2.0)/rand_max/255.0; thiserr[col].b = ((double)rand() - rand_max/2.0)/rand_max/255.0; thiserr[col].a = ((double)rand() - rand_max/2.0)/rand_max/255.0; } // response to this value is non-linear and without it any value < 0.8 would give almost no dithering base_dithering_level = 1.0 - (1.0-base_dithering_level)*(1.0-base_dithering_level)*(1.0-base_dithering_level); if (dither_map) { base_dithering_level *= 1.0/255.0; // convert byte to float } base_dithering_level *= 15.0/16.0; // prevent small errors from accumulating bool fs_direction = true; unsigned int last_match=0; for (unsigned int row = 0; row < rows; ++row) { memset(nexterr, 0, (cols + 2) * sizeof(*nexterr)); unsigned int col = (fs_direction) ? 0 : (cols - 1); const f_pixel *const row_pixels = liq_image_get_row_f(input_image, row); do { float dither_level = base_dithering_level; if (dither_map) { dither_level *= dither_map[row*cols + col]; } const f_pixel spx = get_dithered_pixel(dither_level, max_dither_error, thiserr[col + 1], row_pixels[col]); const unsigned int guessed_match = output_image_is_remapped ? output_pixels[row][col] : last_match; output_pixels[row][col] = last_match = nearest_search(n, spx, guessed_match, min_opaque_val, NULL); const f_pixel xp = acolormap[last_match].acolor; f_pixel err = { .r = (spx.r - xp.r), .g = (spx.g - xp.g), .b = (spx.b - xp.b), .a = (spx.a - xp.a), }; // If dithering error is crazy high, don't propagate it that much // This prevents crazy geen pixels popping out of the blue (or red or black! ;) if (err.r*err.r + err.g*err.g + err.b*err.b + err.a*err.a > max_dither_error) { dither_level *= 0.75; } const float colorimp = (3.0f + acolormap[last_match].acolor.a)/4.0f * dither_level; err.r *= colorimp; err.g *= colorimp; err.b *= colorimp; err.a *= dither_level; /* Propagate Floyd-Steinberg error terms. */ if (fs_direction) { thiserr[col + 2].a += err.a * (7.f/16.f); thiserr[col + 2].r += err.r * (7.f/16.f); thiserr[col + 2].g += err.g * (7.f/16.f); thiserr[col + 2].b += err.b * (7.f/16.f); nexterr[col + 2].a = err.a * (1.f/16.f); nexterr[col + 2].r = err.r * (1.f/16.f); nexterr[col + 2].g = err.g * (1.f/16.f); nexterr[col + 2].b = err.b * (1.f/16.f); nexterr[col + 1].a += err.a * (5.f/16.f); nexterr[col + 1].r += err.r * (5.f/16.f); nexterr[col + 1].g += err.g * (5.f/16.f); nexterr[col + 1].b += err.b * (5.f/16.f); nexterr[col ].a += err.a * (3.f/16.f); nexterr[col ].r += err.r * (3.f/16.f); nexterr[col ].g += err.g * (3.f/16.f); nexterr[col ].b += err.b * (3.f/16.f); } else { thiserr[col ].a += err.a * (7.f/16.f); thiserr[col ].r += err.r * (7.f/16.f); thiserr[col ].g += err.g * (7.f/16.f); thiserr[col ].b += err.b * (7.f/16.f); nexterr[col ].a = err.a * (1.f/16.f); nexterr[col ].r = err.r * (1.f/16.f); nexterr[col ].g = err.g * (1.f/16.f); nexterr[col ].b = err.b * (1.f/16.f); nexterr[col + 1].a += err.a * (5.f/16.f); nexterr[col + 1].r += err.r * (5.f/16.f); nexterr[col + 1].g += err.g * (5.f/16.f); nexterr[col + 1].b += err.b * (5.f/16.f); nexterr[col + 2].a += err.a * (3.f/16.f); nexterr[col + 2].r += err.r * (3.f/16.f); nexterr[col + 2].g += err.g * (3.f/16.f); nexterr[col + 2].b += err.b * (3.f/16.f); } // remapping is done in zig-zag if (fs_direction) { ++col; if (col >= cols) break; } else { if (col <= 0) break; --col; } } while(1); f_pixel *const temperr = thiserr; thiserr = nexterr; nexterr = temperr; fs_direction = !fs_direction; } input_image->free(MIN(thiserr, nexterr)); // MIN because pointers were swapped nearest_free(n); } /* fixed colors are always included in the palette, so it would be wasteful to duplicate them in palette from histogram */ static void remove_fixed_colors_from_histogram(histogram *hist, const liq_image *input_image, const float target_mse) { const float max_difference = MAX(target_mse/2.0, 2.0/256.0/256.0); if (input_image->fixed_colors_count) { for(int j=0; j < hist->size; j++) { for(unsigned int i=0; i < input_image->fixed_colors_count; i++) { if (colordifference(hist->achv[j].acolor, input_image->fixed_colors[i]) < max_difference) { hist->achv[j] = hist->achv[--hist->size]; // remove color from histogram by overwriting with the last entry j--; break; // continue searching histogram } } } } } /* histogram contains information how many times each color is present in the image, weighted by importance_map */ static histogram *get_histogram(liq_image *input_image, const liq_attr *options) { unsigned int ignorebits=MAX(options->min_posterization_output, options->min_posterization_input); const unsigned int cols = input_image->width, rows = input_image->height; if (!input_image->noise && options->use_contrast_maps) { contrast_maps(input_image); } /* ** Step 2: attempt to make a histogram of the colors, unclustered. ** If at first we don't succeed, increase ignorebits to increase color ** coherence and try again. */ unsigned int maxcolors = options->max_histogram_entries; struct acolorhash_table *acht; const bool all_rows_at_once = liq_image_can_use_rows(input_image); do { acht = pam_allocacolorhash(maxcolors, rows*cols, ignorebits, options->malloc, options->free); if (!acht) return NULL; // histogram uses noise contrast map for importance. Color accuracy in noisy areas is not very important. // noise map does not include edges to avoid ruining anti-aliasing for(unsigned int row=0; row < rows; row++) { bool added_ok; if (all_rows_at_once) { added_ok = pam_computeacolorhash(acht, (const rgba_pixel *const *)input_image->rows, cols, rows, input_image->noise); if (added_ok) break; } else { const rgba_pixel* rows_p[1] = { liq_image_get_row_rgba(input_image, row) }; added_ok = pam_computeacolorhash(acht, rows_p, cols, 1, input_image->noise ? &input_image->noise[row * cols] : NULL); } if (!added_ok) { ignorebits++; liq_verbose_printf(options, " too many colors! Scaling colors to improve clustering... %d", ignorebits); pam_freeacolorhash(acht); acht = NULL; break; } } } while(!acht); if (input_image->noise) { input_image->free(input_image->noise); input_image->noise = NULL; } if (input_image->free_pixels && input_image->f_pixels) { liq_image_free_rgba_source(input_image); // bow can free the RGBA source if copy has been made in f_pixels } histogram *hist = pam_acolorhashtoacolorhist(acht, input_image->gamma, options->malloc, options->free); pam_freeacolorhash(acht); if (hist) { liq_verbose_printf(options, " made histogram...%d colors found", hist->size); remove_fixed_colors_from_histogram(hist, input_image, options->target_mse); } return hist; } static void modify_alpha(liq_image *input_image, rgba_pixel *const row_pixels) { /* IE6 makes colors with even slightest transparency completely transparent, thus to improve situation in IE, make colors that are less than ~10% transparent completely opaque */ const float min_opaque_val = input_image->min_opaque_val; const float almost_opaque_val = min_opaque_val * 169.f/256.f; const unsigned int almost_opaque_val_int = (min_opaque_val * 169.f/256.f)*255.f; for(unsigned int col = 0; col < input_image->width; col++) { const rgba_pixel px = row_pixels[col]; /* ie bug: to avoid visible step caused by forced opaqueness, linearily raise opaqueness of almost-opaque colors */ if (px.a >= almost_opaque_val_int) { float al = px.a / 255.f; al = almost_opaque_val + (al-almost_opaque_val) * (1.f-almost_opaque_val) / (min_opaque_val-almost_opaque_val); al *= 256.f; row_pixels[col].a = al >= 255.f ? 255 : al; } } } /** Builds two maps: noise - approximation of areas with high-frequency noise, except straight edges. 1=flat, 0=noisy. edges - noise map including all edges */ static void contrast_maps(liq_image *image) { const int cols = image->width, rows = image->height; if (cols < 4 || rows < 4 || (3*cols*rows) > LIQ_HIGH_MEMORY_LIMIT) { return; } unsigned char *restrict noise = image->malloc(cols*rows); unsigned char *restrict edges = image->malloc(cols*rows); unsigned char *restrict tmp = image->malloc(cols*rows); if (!noise || !edges || !tmp) { return; } const f_pixel *curr_row, *prev_row, *next_row; curr_row = prev_row = next_row = liq_image_get_row_f(image, 0); for (int j=0; j < rows; j++) { prev_row = curr_row; curr_row = next_row; next_row = liq_image_get_row_f(image, MIN(rows-1,j+1)); f_pixel prev, curr = curr_row[0], next=curr; for (int i=0; i < cols; i++) { prev=curr; curr=next; next = curr_row[MIN(cols-1,i+1)]; // contrast is difference between pixels neighbouring horizontally and vertically const float a = fabsf(prev.a+next.a - curr.a*2.f), r = fabsf(prev.r+next.r - curr.r*2.f), g = fabsf(prev.g+next.g - curr.g*2.f), b = fabsf(prev.b+next.b - curr.b*2.f); const f_pixel prevl = prev_row[i]; const f_pixel nextl = next_row[i]; const float a1 = fabsf(prevl.a+nextl.a - curr.a*2.f), r1 = fabsf(prevl.r+nextl.r - curr.r*2.f), g1 = fabsf(prevl.g+nextl.g - curr.g*2.f), b1 = fabsf(prevl.b+nextl.b - curr.b*2.f); const float horiz = MAX(MAX(a,r),MAX(g,b)); const float vert = MAX(MAX(a1,r1),MAX(g1,b1)); const float edge = MAX(horiz,vert); float z = edge - fabsf(horiz-vert)*.5f; z = 1.f - MAX(z,MIN(horiz,vert)); z *= z; // noise is amplified z *= z; z *= 256.f; noise[j*cols+i] = z < 256 ? z : 255; z = (1.f-edge)*256.f; edges[j*cols+i] = z < 256 ? z : 255; } } // noise areas are shrunk and then expanded to remove thin edges from the map liq_max3(noise, tmp, cols, rows); liq_max3(tmp, noise, cols, rows); liq_blur(noise, tmp, noise, cols, rows, 3); liq_max3(noise, tmp, cols, rows); liq_min3(tmp, noise, cols, rows); liq_min3(noise, tmp, cols, rows); liq_min3(tmp, noise, cols, rows); liq_min3(edges, tmp, cols, rows); liq_max3(tmp, edges, cols, rows); for(int i=0; i < cols*rows; i++) edges[i] = MIN(noise[i], edges[i]); image->free(tmp); image->noise = noise; image->edges = edges; } /** * Builds map of neighbor pixels mapped to the same palette entry * * For efficiency/simplicity it mainly looks for same consecutive pixels horizontally * and peeks 1 pixel above/below. Full 2d algorithm doesn't improve it significantly. * Correct flood fill doesn't have visually good properties. */ static void update_dither_map(unsigned char *const *const row_pointers, liq_image *input_image) { const unsigned int width = input_image->width; const unsigned int height = input_image->height; unsigned char *const edges = input_image->edges; for(unsigned int row=0; row < height; row++) { unsigned char lastpixel = row_pointers[row][0]; unsigned int lastcol=0; for(unsigned int col=1; col < width; col++) { const unsigned char px = row_pointers[row][col]; if (px != lastpixel || col == width-1) { float neighbor_count = 2.5f + col-lastcol; unsigned int i=lastcol; while(i < col) { if (row > 0) { unsigned char pixelabove = row_pointers[row-1][i]; if (pixelabove == lastpixel) neighbor_count += 1.f; } if (row < height-1) { unsigned char pixelbelow = row_pointers[row+1][i]; if (pixelbelow == lastpixel) neighbor_count += 1.f; } i++; } while(lastcol <= col) { float e = edges[row*width + lastcol] / 255.f; e *= 1.f - 2.5f/neighbor_count; edges[row*width + lastcol++] = e * 255.f; } lastpixel = px; } } } input_image->dither_map = input_image->edges; input_image->edges = NULL; } static colormap *add_fixed_colors_to_palette(colormap *palette, const int max_colors, const f_pixel fixed_colors[], const int fixed_colors_count, void* (*malloc)(size_t), void (*free)(void*)) { if (!fixed_colors_count) return palette; colormap *newpal = pam_colormap(MIN(max_colors, (palette ? palette->colors : 0) + fixed_colors_count), malloc, free); unsigned int i=0; if (palette && fixed_colors_count < max_colors) { unsigned int palette_max = MIN(palette->colors, max_colors - fixed_colors_count); for(; i < palette_max; i++) { newpal->palette[i] = palette->palette[i]; } } for(int j=0; j < MIN(max_colors, fixed_colors_count); j++) { newpal->palette[i++] = (colormap_item){ .acolor = fixed_colors[j], .fixed = true, }; } if (palette) pam_freecolormap(palette); return newpal; } static void adjust_histogram_callback(hist_item *item, float diff) { item->adjusted_weight = (item->perceptual_weight+item->adjusted_weight) * (sqrtf(1.f+diff)); } /** Repeats mediancut with different histogram weights to find palette with minimum error. feedback_loop_trials controls how long the search will take. < 0 skips the iteration. */ static colormap *find_best_palette(histogram *hist, const liq_attr *options, const double max_mse, const f_pixel fixed_colors[], const unsigned int fixed_colors_count, double *palette_error_p) { unsigned int max_colors = options->max_colors; // if output is posterized it doesn't make sense to aim for perfrect colors, so increase target_mse // at this point actual gamma is not set, so very conservative posterization estimate is used const double target_mse = MIN(max_mse, MAX(options->target_mse, pow((1<<options->min_posterization_output)/1024.0, 2))); int feedback_loop_trials = options->feedback_loop_trials; colormap *acolormap = NULL; double least_error = MAX_DIFF; double target_mse_overshoot = feedback_loop_trials>0 ? 1.05 : 1.0; const double percent = (double)(feedback_loop_trials>0?feedback_loop_trials:1)/100.0; do { colormap *newmap; if (hist->size && fixed_colors_count < max_colors) { newmap = mediancut(hist, options->min_opaque_val, max_colors-fixed_colors_count, target_mse * target_mse_overshoot, MAX(MAX(90.0/65536.0, target_mse), least_error)*1.2, options->malloc, options->free); } else { feedback_loop_trials = 0; newmap = NULL; } newmap = add_fixed_colors_to_palette(newmap, max_colors, fixed_colors, fixed_colors_count, options->malloc, options->free); if (!newmap) { return NULL; } if (feedback_loop_trials <= 0) { return newmap; } // after palette has been created, total error (MSE) is calculated to keep the best palette // at the same time Voronoi iteration is done to improve the palette // and histogram weights are adjusted based on remapping error to give more weight to poorly matched colors const bool first_run_of_target_mse = !acolormap && target_mse > 0; double total_error = viter_do_iteration(hist, newmap, options->min_opaque_val, first_run_of_target_mse ? NULL : adjust_histogram_callback, !acolormap || options->fast_palette); // goal is to increase quality or to reduce number of colors used if quality is good enough if (!acolormap || total_error < least_error || (total_error <= target_mse && newmap->colors < max_colors)) { if (acolormap) pam_freecolormap(acolormap); acolormap = newmap; if (total_error < target_mse && total_error > 0) { // voronoi iteration improves quality above what mediancut aims for // this compensates for it, making mediancut aim for worse target_mse_overshoot = MIN(target_mse_overshoot*1.25, target_mse/total_error); } least_error = total_error; // if number of colors could be reduced, try to keep it that way // but allow extra color as a bit of wiggle room in case quality can be improved too max_colors = MIN(newmap->colors+1, max_colors); feedback_loop_trials -= 1; // asymptotic improvement could make it go on forever } else { for(unsigned int j=0; j < hist->size; j++) { hist->achv[j].adjusted_weight = (hist->achv[j].perceptual_weight + hist->achv[j].adjusted_weight)/2.0; } target_mse_overshoot = 1.0; feedback_loop_trials -= 6; // if error is really bad, it's unlikely to improve, so end sooner if (total_error > least_error*4) feedback_loop_trials -= 3; pam_freecolormap(newmap); } liq_verbose_printf(options, " selecting colors...%d%%",100-MAX(0,(int)(feedback_loop_trials/percent))); } while(feedback_loop_trials > 0); *palette_error_p = least_error; return acolormap; } static liq_result *pngquant_quantize(histogram *hist, const liq_attr *options, const liq_image *img) { colormap *acolormap; double palette_error = -1; // no point having perfect match with imperfect colors (ignorebits > 0) const bool fast_palette = options->fast_palette || hist->ignorebits > 0; const bool few_input_colors = hist->size+img->fixed_colors_count <= options->max_colors; // If image has few colors to begin with (and no quality degradation is required) // then it's possible to skip quantization entirely if (few_input_colors && options->target_mse == 0) { acolormap = pam_colormap(hist->size, options->malloc, options->free); for(unsigned int i=0; i < hist->size; i++) { acolormap->palette[i].acolor = hist->achv[i].acolor; acolormap->palette[i].popularity = hist->achv[i].perceptual_weight; } acolormap = add_fixed_colors_to_palette(acolormap, options->max_colors, img->fixed_colors, img->fixed_colors_count, options->malloc, options->free); palette_error = 0; } else { const double max_mse = options->max_mse * (few_input_colors ? 0.33 : 1.0); // when degrading image that's already paletted, require much higher improvement, since pal2pal often looks bad and there's little gain acolormap = find_best_palette(hist, options, max_mse, img->fixed_colors, img->fixed_colors_count, &palette_error); if (!acolormap) { return NULL; } // Voronoi iteration approaches local minimum for the palette const double iteration_limit = options->voronoi_iteration_limit; unsigned int iterations = options->voronoi_iterations; if (!iterations && palette_error < 0 && max_mse < MAX_DIFF) iterations = 1; // otherwise total error is never calculated and MSE limit won't work if (iterations) { // likely_colormap_index (used and set in viter_do_iteration) can't point to index outside colormap if (acolormap->colors < 256) for(unsigned int j=0; j < hist->size; j++) { if (hist->achv[j].tmp.likely_colormap_index >= acolormap->colors) { hist->achv[j].tmp.likely_colormap_index = 0; // actual value doesn't matter, as the guess is out of date anyway } } verbose_print(options, " moving colormap towards local minimum"); double previous_palette_error = MAX_DIFF; for(unsigned int i=0; i < iterations; i++) { palette_error = viter_do_iteration(hist, acolormap, options->min_opaque_val, NULL, i==0 || options->fast_palette); if (fabs(previous_palette_error-palette_error) < iteration_limit) { break; } if (palette_error > max_mse*1.5) { // probably hopeless if (palette_error > max_mse*3.0) break; // definitely hopeless i++; } previous_palette_error = palette_error; } } if (palette_error > max_mse) { liq_verbose_printf(options, " image degradation MSE=%.3f (Q=%d) exceeded limit of %.3f (%d)", palette_error*65536.0/6.0, mse_to_quality(palette_error), max_mse*65536.0/6.0, mse_to_quality(max_mse)); pam_freecolormap(acolormap); return NULL; } } sort_palette(acolormap, options); liq_result *result = options->malloc(sizeof(liq_result)); if (!result) return NULL; *result = (liq_result){ .magic_header = liq_result_magic, .malloc = options->malloc, .free = options->free, .palette = acolormap, .palette_error = palette_error, .fast_palette = fast_palette, .use_dither_map = options->use_dither_map, .gamma = img->gamma, .min_posterization_output = options->min_posterization_output, }; return result; } LIQ_EXPORT liq_error liq_write_remapped_image(liq_result *result, liq_image *input_image, void *buffer, size_t buffer_size) { if (!CHECK_STRUCT_TYPE(result, liq_result)) { return LIQ_INVALID_POINTER; } if (!CHECK_STRUCT_TYPE(input_image, liq_image)) { return LIQ_INVALID_POINTER; } if (!CHECK_USER_POINTER(buffer)) { return LIQ_INVALID_POINTER; } const size_t required_size = input_image->width * input_image->height; if (buffer_size < required_size) { return LIQ_BUFFER_TOO_SMALL; } unsigned char *rows[input_image->height]; unsigned char *buffer_bytes = buffer; for(unsigned int i=0; i < input_image->height; i++) { rows[i] = &buffer_bytes[input_image->width * i]; } return liq_write_remapped_image_rows(result, input_image, rows); } LIQ_EXPORT liq_error liq_write_remapped_image_rows(liq_result *quant, liq_image *input_image, unsigned char **row_pointers) { if (!CHECK_STRUCT_TYPE(quant, liq_result)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return LIQ_INVALID_POINTER; for(unsigned int i=0; i < input_image->height; i++) { if (!CHECK_USER_POINTER(row_pointers+i) || !CHECK_USER_POINTER(row_pointers[i])) return LIQ_INVALID_POINTER; } if (quant->remapping) { liq_remapping_result_destroy(quant->remapping); } liq_remapping_result *const result = quant->remapping = liq_remapping_result_create(quant); if (!result) return LIQ_OUT_OF_MEMORY; if (!input_image->edges && !input_image->dither_map && quant->use_dither_map) { contrast_maps(input_image); } /* ** Step 4: map the colors in the image to their closest match in the ** new colormap, and write 'em out. */ float remapping_error = result->palette_error; if (result->dither_level == 0) { set_rounded_palette(&result->int_palette, result->palette, result->gamma, quant->min_posterization_output); remapping_error = remap_to_palette(input_image, row_pointers, result->palette, quant->fast_palette); } else { const bool generate_dither_map = result->use_dither_map && (input_image->edges && !input_image->dither_map); if (generate_dither_map) { // If dithering (with dither map) is required, this image is used to find areas that require dithering remapping_error = remap_to_palette(input_image, row_pointers, result->palette, quant->fast_palette); update_dither_map(row_pointers, input_image); } // remapping above was the last chance to do voronoi iteration, hence the final palette is set after remapping set_rounded_palette(&result->int_palette, result->palette, result->gamma, quant->min_posterization_output); remap_to_palette_floyd(input_image, row_pointers, result->palette, MAX(remapping_error*2.4, 16.f/256.f), result->use_dither_map, generate_dither_map, result->dither_level); } // remapping error from dithered image is absurd, so always non-dithered value is used // palette_error includes some perceptual weighting from histogram which is closer correlated with dssim // so that should be used when possible. if (result->palette_error < 0) { result->palette_error = remapping_error; } return LIQ_OK; } LIQ_EXPORT int liq_version() { return LIQ_VERSION; }
draw.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD RRRR AAA W W % % D D R R A A W W % % D D RRRR AAAAA W W W % % D D R RN A A WW WW % % DDDD R R A A W W % % % % % % MagickCore Image Drawing Methods % % % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon % rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion", % Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent % (www.appligent.com) contributed the dash pattern, linecap stroking % algorithm, and minor rendering improvements. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Define declarations. */ #define BezierQuantum 200 #define PrimitiveExtentPad 4296.0 #define MaxBezierCoordinates 67108864 #define ThrowPointExpectedException(token,exception) \ { \ (void) ThrowMagickException(exception,GetMagickModule(),DrawError, \ "NonconformingDrawingPrimitiveDefinition","`%s'",token); \ status=MagickFalse; \ break; \ } /* Typedef declarations. */ typedef struct _EdgeInfo { SegmentInfo bounds; double scanline; PointInfo *points; size_t number_points; ssize_t direction; MagickBooleanType ghostline; size_t highwater; } EdgeInfo; typedef struct _ElementInfo { double cx, cy, major, minor, angle; } ElementInfo; typedef struct _MVGInfo { PrimitiveInfo **primitive_info; size_t *extent; ssize_t offset; PointInfo point; ExceptionInfo *exception; } MVGInfo; typedef struct _PolygonInfo { EdgeInfo *edges; size_t number_edges; } PolygonInfo; typedef enum { MoveToCode, OpenCode, GhostlineCode, LineToCode, EndCode } PathInfoCode; typedef struct _PathInfo { PointInfo point; PathInfoCode code; } PathInfo; /* Forward declarations. */ static Image *DrawClippingMask(Image *,const DrawInfo *,const char *,const char *, ExceptionInfo *); static MagickBooleanType DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *, ExceptionInfo *), RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *), TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo, const double,const MagickBooleanType,const MagickBooleanType), TraceBezier(MVGInfo *,const size_t), TraceCircle(MVGInfo *,const PointInfo,const PointInfo), TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo), TraceSquareLinecap(PrimitiveInfo *,const size_t,const double); static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *,const PrimitiveInfo *,ExceptionInfo *); static ssize_t TracePath(MVGInfo *,const char *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireDrawInfo() returns a DrawInfo structure properly initialized. % % The format of the AcquireDrawInfo method is: % % DrawInfo *AcquireDrawInfo(void) % */ MagickExport DrawInfo *AcquireDrawInfo(void) { DrawInfo *draw_info; draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info)); GetDrawInfo((ImageInfo *) NULL,draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneDrawInfo() makes a copy of the given draw_info structure. If NULL % is specified, a new DrawInfo structure is created initialized to default % values. % % The format of the CloneDrawInfo method is: % % DrawInfo *CloneDrawInfo(const ImageInfo *image_info, % const DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info. % % o draw_info: the draw info. % */ MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info, const DrawInfo *draw_info) { DrawInfo *clone_info; ExceptionInfo *exception; clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetDrawInfo(image_info,clone_info); if (draw_info == (DrawInfo *) NULL) return(clone_info); exception=AcquireExceptionInfo(); if (draw_info->id != (char *) NULL) (void) CloneString(&clone_info->id,draw_info->id); if (draw_info->primitive != (char *) NULL) (void) CloneString(&clone_info->primitive,draw_info->primitive); if (draw_info->geometry != (char *) NULL) (void) CloneString(&clone_info->geometry,draw_info->geometry); clone_info->compliance=draw_info->compliance; clone_info->viewbox=draw_info->viewbox; clone_info->affine=draw_info->affine; clone_info->gravity=draw_info->gravity; clone_info->fill=draw_info->fill; clone_info->stroke=draw_info->stroke; clone_info->stroke_width=draw_info->stroke_width; if (draw_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue, exception); if (draw_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke_antialias=draw_info->stroke_antialias; clone_info->text_antialias=draw_info->text_antialias; clone_info->fill_rule=draw_info->fill_rule; clone_info->linecap=draw_info->linecap; clone_info->linejoin=draw_info->linejoin; clone_info->miterlimit=draw_info->miterlimit; clone_info->dash_offset=draw_info->dash_offset; clone_info->decorate=draw_info->decorate; clone_info->compose=draw_info->compose; if (draw_info->text != (char *) NULL) (void) CloneString(&clone_info->text,draw_info->text); if (draw_info->font != (char *) NULL) (void) CloneString(&clone_info->font,draw_info->font); if (draw_info->metrics != (char *) NULL) (void) CloneString(&clone_info->metrics,draw_info->metrics); if (draw_info->family != (char *) NULL) (void) CloneString(&clone_info->family,draw_info->family); clone_info->style=draw_info->style; clone_info->stretch=draw_info->stretch; clone_info->weight=draw_info->weight; if (draw_info->encoding != (char *) NULL) (void) CloneString(&clone_info->encoding,draw_info->encoding); clone_info->pointsize=draw_info->pointsize; clone_info->kerning=draw_info->kerning; clone_info->interline_spacing=draw_info->interline_spacing; clone_info->interword_spacing=draw_info->interword_spacing; clone_info->direction=draw_info->direction; if (draw_info->density != (char *) NULL) (void) CloneString(&clone_info->density,draw_info->density); clone_info->align=draw_info->align; clone_info->undercolor=draw_info->undercolor; clone_info->border_color=draw_info->border_color; if (draw_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) { ssize_t x; for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ; clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*clone_info->dash_pattern)); if (clone_info->dash_pattern == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)* sizeof(*clone_info->dash_pattern)); (void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t) (x+1)*sizeof(*clone_info->dash_pattern)); } clone_info->gradient=draw_info->gradient; if (draw_info->gradient.stops != (StopInfo *) NULL) { size_t number_stops; number_stops=clone_info->gradient.number_stops; clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t) number_stops,sizeof(*clone_info->gradient.stops)); if (clone_info->gradient.stops == (StopInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops, (size_t) number_stops*sizeof(*clone_info->gradient.stops)); } clone_info->bounds=draw_info->bounds; clone_info->fill_alpha=draw_info->fill_alpha; clone_info->stroke_alpha=draw_info->stroke_alpha; clone_info->element_reference=draw_info->element_reference; clone_info->clip_path=draw_info->clip_path; clone_info->clip_units=draw_info->clip_units; if (draw_info->clip_mask != (char *) NULL) (void) CloneString(&clone_info->clip_mask,draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0, MagickTrue,exception); if (draw_info->composite_mask != (Image *) NULL) clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0, MagickTrue,exception); clone_info->render=draw_info->render; clone_info->debug=IsEventLogging(); exception=DestroyExceptionInfo(exception); return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P a t h T o P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPathToPolygon() converts a path to the more efficient sorted % rendering form. % % The format of the ConvertPathToPolygon method is: % % PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info, % ExceptionInfo *excetion) % % A description of each parameter follows: % % o ConvertPathToPolygon() returns the path in a more efficient sorted % rendering form of type PolygonInfo. % % o draw_info: Specifies a pointer to an DrawInfo structure. % % o path_info: Specifies a pointer to an PathInfo structure. % % */ static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) { ssize_t i; if (polygon_info->edges != (EdgeInfo *) NULL) { for (i=0; i < (ssize_t) polygon_info->number_edges; i++) if (polygon_info->edges[i].points != (PointInfo *) NULL) polygon_info->edges[i].points=(PointInfo *) RelinquishMagickMemory(polygon_info->edges[i].points); polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory( polygon_info->edges); } return((PolygonInfo *) RelinquishMagickMemory(polygon_info)); } #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int DrawCompareEdges(const void *p_edge,const void *q_edge) { #define DrawCompareEdge(p,q) \ { \ if (((p)-(q)) < 0.0) \ return(-1); \ if (((p)-(q)) > 0.0) \ return(1); \ } const PointInfo *p, *q; /* Edge sorting for right-handed coordinate system. */ p=((const EdgeInfo *) p_edge)->points; q=((const EdgeInfo *) q_edge)->points; DrawCompareEdge(p[0].y,q[0].y); DrawCompareEdge(p[0].x,q[0].x); DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)* (q[1].x-q[0].x)); DrawCompareEdge(p[1].y,q[1].y); DrawCompareEdge(p[1].x,q[1].x); return(0); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static void LogPolygonInfo(const PolygonInfo *polygon_info) { EdgeInfo *p; ssize_t i, j; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge"); p=polygon_info->edges; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { (void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:", (double) i); (void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s", p->direction != MagickFalse ? "down" : "up"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s", p->ghostline != MagickFalse ? "transparent" : "opaque"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1, p->bounds.x2,p->bounds.y2); for (j=0; j < (ssize_t) p->number_points; j++) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g", p->points[j].x,p->points[j].y); p++; } (void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge"); } static void ReversePoints(PointInfo *points,const size_t number_points) { PointInfo point; ssize_t i; for (i=0; i < (ssize_t) (number_points >> 1); i++) { point=points[i]; points[i]=points[number_points-(i+1)]; points[number_points-(i+1)]=point; } } static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info, ExceptionInfo *exception) { long direction, next_direction; PointInfo point, *points; PolygonInfo *polygon_info; SegmentInfo bounds; ssize_t i, n; MagickBooleanType ghostline; size_t edge, number_edges, number_points; /* Convert a path to the more efficient sorted rendering form. */ polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info)); if (polygon_info == (PolygonInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PolygonInfo *) NULL); } number_edges=16; polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } (void) memset(polygon_info->edges,0,number_edges* sizeof(*polygon_info->edges)); direction=0; edge=0; ghostline=MagickFalse; n=0; number_points=0; points=(PointInfo *) NULL; (void) memset(&point,0,sizeof(point)); (void) memset(&bounds,0,sizeof(bounds)); polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=0.0; polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) direction; polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->number_edges=0; for (i=0; path_info[i].code != EndCode; i++) { if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) || (path_info[i].code == GhostlineCode)) { /* Move to. */ if ((points != (PointInfo *) NULL) && (n >= 2)) { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); points=(PointInfo *) RelinquishMagickMemory(points); return(DestroyPolygonInfo(polygon_info)); } } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; polygon_info->number_edges=edge; } if (points == (PointInfo *) NULL) { number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse; point=path_info[i].point; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; direction=0; n=1; continue; } /* Line to. */ next_direction=((path_info[i].point.y > point.y) || ((fabs(path_info[i].point.y-point.y) < MagickEpsilon) && (path_info[i].point.x > point.x))) ? 1 : -1; if ((points != (PointInfo *) NULL) && (direction != 0) && (direction != next_direction)) { /* New edge. */ point=points[n-1]; if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); points=(PointInfo *) RelinquishMagickMemory(points); return(DestroyPolygonInfo(polygon_info)); } } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; polygon_info->number_edges=edge+1; points=(PointInfo *) NULL; number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } n=1; ghostline=MagickFalse; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; edge++; } direction=next_direction; if (points == (PointInfo *) NULL) continue; if (n == (ssize_t) number_points) { number_points<<=1; points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } point=path_info[i].point; points[n]=point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.x > bounds.x2) bounds.x2=point.x; n++; } if (points != (PointInfo *) NULL) { if (n < 2) points=(PointInfo *) RelinquishMagickMemory(points); else { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; polygon_info->number_edges=edge; } } polygon_info->number_edges=edge; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(polygon_info->edges, polygon_info->number_edges,sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { EdgeInfo *edge_info; edge_info=polygon_info->edges+i; edge_info->points=(PointInfo *) ResizeQuantumMemory(edge_info->points, edge_info->number_points,sizeof(*edge_info->points)); if (edge_info->points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } qsort(polygon_info->edges,(size_t) polygon_info->number_edges, sizeof(*polygon_info->edges),DrawCompareEdges); if (IsEventLogging() != MagickFalse) LogPolygonInfo(polygon_info); return(polygon_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P r i m i t i v e T o P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector % path structure. % % The format of the ConvertPrimitiveToPath method is: % % PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o ConvertPrimitiveToPath() returns a vector path structure of type % PathInfo. % % o draw_info: a structure of type DrawInfo. % % o primitive_info: Specifies a pointer to an PrimitiveInfo structure. % */ static void LogPathInfo(const PathInfo *path_info) { const PathInfo *p; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path"); for (p=path_info; p->code != EndCode; p++) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ? "moveto ghostline" : p->code == OpenCode ? "moveto open" : p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" : "?"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path"); } static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { MagickBooleanType closed_subpath; PathInfo *path_info; PathInfoCode code; PointInfo p, q; ssize_t i, n; ssize_t coordinates, start; /* Converts a PrimitiveInfo structure into a vector path structure. */ switch (primitive_info->primitive) { case AlphaPrimitive: case ColorPrimitive: case ImagePrimitive: case PointPrimitive: case TextPrimitive: return((PathInfo *) NULL); default: break; } for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL), sizeof(*path_info)); if (path_info == (PathInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PathInfo *) NULL); } coordinates=0; closed_subpath=MagickFalse; n=0; p.x=(-1.0); p.y=(-1.0); q.x=(-1.0); q.y=(-1.0); start=0; for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { code=LineToCode; if (coordinates <= 0) { /* New subpath. */ coordinates=(ssize_t) primitive_info[i].coordinates; p=primitive_info[i].point; start=n; code=MoveToCode; closed_subpath=primitive_info[i].closed_subpath; } coordinates--; if ((code == MoveToCode) || (coordinates <= 0) || (fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) || (fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon)) { /* Eliminate duplicate points. */ path_info[n].code=code; path_info[n].point=primitive_info[i].point; q=primitive_info[i].point; n++; } if (coordinates > 0) continue; /* next point in current subpath */ if (closed_subpath != MagickFalse) { closed_subpath=MagickFalse; continue; } /* Mark the p point as open if the subpath is not closed. */ path_info[start].code=OpenCode; path_info[n].code=GhostlineCode; path_info[n].point=primitive_info[i].point; n++; path_info[n].code=LineToCode; path_info[n].point=p; n++; } path_info[n].code=EndCode; path_info[n].point.x=0.0; path_info[n].point.y=0.0; if (IsEventLogging() != MagickFalse) LogPathInfo(path_info); path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1), sizeof(*path_info)); return(path_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyDrawInfo() deallocates memory associated with an DrawInfo structure. % % The format of the DestroyDrawInfo method is: % % DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) % % A description of each parameter follows: % % o draw_info: the draw info. % */ MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) { assert(draw_info != (DrawInfo *) NULL); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info->signature == MagickCoreSignature); if (draw_info->id != (char *) NULL) draw_info->id=DestroyString(draw_info->id); if (draw_info->primitive != (char *) NULL) draw_info->primitive=DestroyString(draw_info->primitive); if (draw_info->text != (char *) NULL) draw_info->text=DestroyString(draw_info->text); if (draw_info->geometry != (char *) NULL) draw_info->geometry=DestroyString(draw_info->geometry); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern); if (draw_info->font != (char *) NULL) draw_info->font=DestroyString(draw_info->font); if (draw_info->metrics != (char *) NULL) draw_info->metrics=DestroyString(draw_info->metrics); if (draw_info->family != (char *) NULL) draw_info->family=DestroyString(draw_info->family); if (draw_info->encoding != (char *) NULL) draw_info->encoding=DestroyString(draw_info->encoding); if (draw_info->density != (char *) NULL) draw_info->density=DestroyString(draw_info->density); if (draw_info->server_name != (char *) NULL) draw_info->server_name=(char *) RelinquishMagickMemory(draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) draw_info->dash_pattern=(double *) RelinquishMagickMemory( draw_info->dash_pattern); if (draw_info->gradient.stops != (StopInfo *) NULL) draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory( draw_info->gradient.stops); if (draw_info->clip_mask != (char *) NULL) draw_info->clip_mask=DestroyString(draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask); if (draw_info->composite_mask != (Image *) NULL) draw_info->composite_mask=DestroyImage(draw_info->composite_mask); draw_info->signature=(~MagickCoreSignature); draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w A f f i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawAffineImage() composites the source over the destination image as % dictated by the affine transform. % % The format of the DrawAffineImage method is: % % MagickBooleanType DrawAffineImage(Image *image,const Image *source, % const AffineMatrix *affine,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o source: the source image. % % o affine: the affine transform. % % o exception: return any errors or warnings in this structure. % */ static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine, const double y,const SegmentInfo *edge) { double intercept, z; double x; SegmentInfo inverse_edge; /* Determine left and right edges. */ inverse_edge.x1=edge->x1; inverse_edge.y1=edge->y1; inverse_edge.x2=edge->x2; inverse_edge.y2=edge->y2; z=affine->ry*y+affine->tx; if (affine->sx >= MagickEpsilon) { intercept=(-z/affine->sx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->sx < -MagickEpsilon) { intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->sx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns)) { inverse_edge.x2=edge->x1; return(inverse_edge); } /* Determine top and bottom edges. */ z=affine->sy*y+affine->ty; if (affine->rx >= MagickEpsilon) { intercept=(-z/affine->rx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->rx < -MagickEpsilon) { intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->rx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows)) { inverse_edge.x2=edge->x2; return(inverse_edge); } return(inverse_edge); } static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine) { AffineMatrix inverse_affine; double determinant; determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx* affine->ry); inverse_affine.sx=determinant*affine->sy; inverse_affine.rx=determinant*(-affine->rx); inverse_affine.ry=determinant*(-affine->ry); inverse_affine.sy=determinant*affine->sx; inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty* inverse_affine.ry; inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty* inverse_affine.sy; return(inverse_affine); } MagickExport MagickBooleanType DrawAffineImage(Image *image, const Image *source,const AffineMatrix *affine,ExceptionInfo *exception) { AffineMatrix inverse_affine; CacheView *image_view, *source_view; MagickBooleanType status; PixelInfo zero; PointInfo extent[4], min, max; ssize_t i; SegmentInfo edge; ssize_t start, stop, y; /* Determine bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(source != (const Image *) NULL); assert(source->signature == MagickCoreSignature); assert(affine != (AffineMatrix *) NULL); extent[0].x=0.0; extent[0].y=0.0; extent[1].x=(double) source->columns-1.0; extent[1].y=0.0; extent[2].x=(double) source->columns-1.0; extent[2].y=(double) source->rows-1.0; extent[3].x=0.0; extent[3].y=(double) source->rows-1.0; for (i=0; i < 4; i++) { PointInfo point; point=extent[i]; extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx; extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } /* Affine transform image. */ if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; edge.x1=MagickMax(min.x,0.0); edge.y1=MagickMax(min.y,0.0); edge.x2=MagickMin(max.x,(double) image->columns-1.0); edge.y2=MagickMin(max.y,(double) image->rows-1.0); inverse_affine=InverseAffineMatrix(affine); GetPixelInfo(image,&zero); start=CastDoubleToLong(ceil(edge.y1-0.5)); stop=CastDoubleToLong(floor(edge.y2+0.5)); source_view=AcquireVirtualCacheView(source,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,image,stop-start,1) #endif for (y=start; y <= stop; y++) { PixelInfo composite, pixel; PointInfo point; ssize_t x; Quantum *magick_restrict q; SegmentInfo inverse_edge; ssize_t x_offset; if (status == MagickFalse) continue; inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge); if (inverse_edge.x2 < inverse_edge.x1) continue; q=GetCacheViewAuthenticPixels(image_view,CastDoubleToLong( ceil(inverse_edge.x1-0.5)),y,(size_t) CastDoubleToLong(floor( inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),1,exception); if (q == (Quantum *) NULL) continue; pixel=zero; composite=zero; x_offset=0; for (x=CastDoubleToLong(ceil(inverse_edge.x1-0.5)); x <= CastDoubleToLong(floor(inverse_edge.x2+0.5)); x++) { point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+ inverse_affine.tx; point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+ inverse_affine.ty; status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel, point.x,point.y,&pixel,exception); if (status == MagickFalse) break; GetPixelInfoPixel(image,q,&composite); CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha, &composite); SetPixelViaPixelInfo(image,&composite,q); x_offset++; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w B o u n d i n g R e c t a n g l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawBoundingRectangles() draws the bounding rectangles on the image. This % is only useful for developers debugging the rendering algorithm. % % The format of the DrawBoundingRectangles method is: % % MagickBooleanType DrawBoundingRectangles(Image *image, % const DrawInfo *draw_info,PolygonInfo *polygon_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o polygon_info: Specifies a pointer to a PolygonInfo structure. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType DrawBoundingRectangles(Image *image, const DrawInfo *draw_info,const PolygonInfo *polygon_info, ExceptionInfo *exception) { double mid; DrawInfo *clone_info; MagickStatusType status; PointInfo end, resolution, start; PrimitiveInfo primitive_info[6]; ssize_t i; SegmentInfo bounds; ssize_t coordinates; (void) memset(primitive_info,0,sizeof(primitive_info)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill, exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } resolution.x=96.0; resolution.y=96.0; if (clone_info->density != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(clone_info->density,&geometry_info); if ((flags & RhoValue) != 0) resolution.x=geometry_info.rho; resolution.y=resolution.x; if ((flags & SigmaValue) != 0) resolution.y=geometry_info.sigma; } mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)* clone_info->stroke_width/2.0; bounds.x1=0.0; bounds.y1=0.0; bounds.x2=0.0; bounds.y2=0.0; if (polygon_info != (PolygonInfo *) NULL) { bounds=polygon_info->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1) bounds.x1=polygon_info->edges[i].bounds.x1; if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1) bounds.y1=polygon_info->edges[i].bounds.y1; if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2) bounds.x2=polygon_info->edges[i].bounds.x2; if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2) bounds.y2=polygon_info->edges[i].bounds.y2; } bounds.x1-=mid; bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns ? (double) image->columns-1 : bounds.x1; bounds.y1-=mid; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows ? (double) image->rows-1 : bounds.y1; bounds.x2+=mid; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns ? (double) image->columns-1 : bounds.x2; bounds.y2+=mid; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows ? (double) image->rows-1 : bounds.y2; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].direction != 0) status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke, exception); else status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke, exception); if (status == MagickFalse) break; start.x=(double) (polygon_info->edges[i].bounds.x1-mid); start.y=(double) (polygon_info->edges[i].bounds.y1-mid); end.x=(double) (polygon_info->edges[i].bounds.x2+mid); end.y=(double) (polygon_info->edges[i].bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info,exception); if (status == MagickFalse) break; } if (i < (ssize_t) polygon_info->number_edges) { clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } } status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke, exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } start.x=(double) (bounds.x1-mid); start.y=(double) (bounds.y1-mid); end.x=(double) (bounds.x2+mid); end.y=(double) (bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info,exception); clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClipPath() draws the clip path on the image mask. % % The format of the DrawClipPath method is: % % MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info, % const char *id,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawClipPath(Image *image, const DrawInfo *draw_info,const char *id,ExceptionInfo *exception) { const char *clip_path; Image *clipping_mask; MagickBooleanType status; clip_path=GetImageArtifact(image,id); if (clip_path == (const char *) NULL) return(MagickFalse); clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path, exception); if (clipping_mask == (Image *) NULL) return(MagickFalse); status=SetImageMask(image,WritePixelMask,clipping_mask,exception); clipping_mask=DestroyImage(clipping_mask); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p p i n g M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClippingMask() draws the clip path and returns it as an image clipping % mask. % % The format of the DrawClippingMask method is: % % Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *clip_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o clip_path: the clip path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, const char *id,const char *clip_path,ExceptionInfo *exception) { DrawInfo *clone_info; Image *clip_mask, *separate_mask; MagickStatusType status; /* Draw a clip path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); clip_mask=AcquireImage((const ImageInfo *) NULL,exception); status=SetImageExtent(clip_mask,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(clip_mask)); status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception); status=QueryColorCompliance("#0000",AllCompliance, &clip_mask->background_color,exception); clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha; clip_mask->background_color.alpha_trait=BlendPixelTrait; status=SetImageBackgroundColor(clip_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,clip_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); if (clone_info->clip_mask != (char *) NULL) clone_info->clip_mask=DestroyString(clone_info->clip_mask); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->alpha=OpaqueAlpha; clone_info->clip_path=MagickTrue; status=RenderMVGContent(clip_mask,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); separate_mask=SeparateImage(clip_mask,AlphaChannel,exception); if (separate_mask != (Image *) NULL) { clip_mask=DestroyImage(clip_mask); clip_mask=separate_mask; status=NegateImage(clip_mask,MagickFalse,exception); if (status == MagickFalse) clip_mask=DestroyImage(clip_mask); } if (status == MagickFalse) clip_mask=DestroyImage(clip_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path"); return(clip_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C o m p o s i t e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawCompositeMask() draws the mask path and returns it as an image mask. % % The format of the DrawCompositeMask method is: % % Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *mask_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the mask path id. % % o mask_path: the mask path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, const char *id,const char *mask_path,ExceptionInfo *exception) { Image *composite_mask, *separate_mask; DrawInfo *clone_info; MagickStatusType status; /* Draw a mask path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); composite_mask=AcquireImage((const ImageInfo *) NULL,exception); status=SetImageExtent(composite_mask,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(composite_mask)); status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL, exception); status=QueryColorCompliance("#0000",AllCompliance, &composite_mask->background_color,exception); composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha; composite_mask->background_color.alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(composite_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,mask_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->alpha=OpaqueAlpha; status=RenderMVGContent(composite_mask,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); separate_mask=SeparateImage(composite_mask,AlphaChannel,exception); if (separate_mask != (Image *) NULL) { composite_mask=DestroyImage(composite_mask); composite_mask=separate_mask; status=NegateImage(composite_mask,MagickFalse,exception); if (status == MagickFalse) composite_mask=DestroyImage(composite_mask); } if (status == MagickFalse) composite_mask=DestroyImage(composite_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path"); return(composite_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w D a s h P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the % image while respecting the dash offset and dash pattern attributes. % % The format of the DrawDashPolygon method is: % % MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception) { double length, maximum_length, offset, scale, total_length; DrawInfo *clone_info; MagickStatusType status; PrimitiveInfo *dash_polygon; double dx, dy; ssize_t i; size_t number_vertices; ssize_t j, n; assert(draw_info != (const DrawInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash"); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; number_vertices=(size_t) i; dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (2UL*number_vertices+32UL),sizeof(*dash_polygon)); if (dash_polygon == (PrimitiveInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } (void) memset(dash_polygon,0,(2UL*number_vertices+32UL)* sizeof(*dash_polygon)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->miterlimit=0; dash_polygon[0]=primitive_info[0]; scale=ExpandAffine(&draw_info->affine); length=scale*draw_info->dash_pattern[0]; offset=fabs(draw_info->dash_offset) >= MagickEpsilon ? scale*draw_info->dash_offset : 0.0; j=1; for (n=0; offset > 0.0; j=0) { if (draw_info->dash_pattern[n] <= 0.0) break; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); if (offset > length) { offset-=length; n++; length=scale*draw_info->dash_pattern[n]; continue; } if (offset < length) { length-=offset; offset=0.0; break; } offset=0.0; n++; } status=MagickTrue; maximum_length=0.0; total_length=0.0; for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++) { dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot(dx,dy); if (maximum_length > (double) (MaxBezierCoordinates >> 2)) continue; if (fabs(length) < MagickEpsilon) { if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); ) { total_length+=length; if ((n & 0x01) != 0) { dash_polygon[0]=primitive_info[0]; dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); j=1; } else { if ((j+1) > (ssize_t) number_vertices) break; dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); if (status == MagickFalse) break; } if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } length-=(maximum_length-total_length); if ((n & 0x01) != 0) continue; dash_polygon[j]=primitive_info[i]; dash_polygon[j].coordinates=1; j++; } if ((status != MagickFalse) && (total_length < maximum_length) && ((n & 0x01) == 0) && (j > 1)) { dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x+=MagickEpsilon; dash_polygon[j].point.y+=MagickEpsilon; dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); } dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGradientImage() draws a linear gradient on the image. % % The format of the DrawGradientImage method is: % % MagickBooleanType DrawGradientImage(Image *image, % const DrawInfo *draw_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static inline double GetStopColorOffset(const GradientInfo *gradient, const ssize_t x,const ssize_t y) { switch (gradient->type) { case UndefinedGradient: case LinearGradient: { double gamma, length, offset, scale; PointInfo p, q; const SegmentInfo *gradient_vector; gradient_vector=(&gradient->gradient_vector); p.x=gradient_vector->x2-gradient_vector->x1; p.y=gradient_vector->y2-gradient_vector->y1; q.x=(double) x-gradient_vector->x1; q.y=(double) y-gradient_vector->y1; length=sqrt(q.x*q.x+q.y*q.y); gamma=sqrt(p.x*p.x+p.y*p.y)*length; gamma=PerceptibleReciprocal(gamma); scale=p.x*q.x+p.y*q.y; offset=gamma*scale*length; return(offset); } case RadialGradient: { PointInfo v; if (gradient->spread == RepeatSpread) { v.x=(double) x-gradient->center.x; v.y=(double) y-gradient->center.y; return(sqrt(v.x*v.x+v.y*v.y)); } v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians( gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.x); v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians( gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.y); return(sqrt(v.x*v.x+v.y*v.y)); } } return(0.0); } static int StopInfoCompare(const void *x,const void *y) { StopInfo *stop_1, *stop_2; stop_1=(StopInfo *) x; stop_2=(StopInfo *) y; if (stop_1->offset > stop_2->offset) return(1); if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon) return(0); return(-1); } MagickExport MagickBooleanType DrawGradientImage(Image *image, const DrawInfo *draw_info,ExceptionInfo *exception) { CacheView *image_view; const GradientInfo *gradient; const SegmentInfo *gradient_vector; double length; MagickBooleanType status; PixelInfo zero; PointInfo point; RectangleInfo bounding_box; ssize_t y; /* Draw linear or radial gradient on image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); gradient=(&draw_info->gradient); qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo), StopInfoCompare); gradient_vector=(&gradient->gradient_vector); point.x=gradient_vector->x2-gradient_vector->x1; point.y=gradient_vector->y2-gradient_vector->y1; length=sqrt(point.x*point.x+point.y*point.y); bounding_box=gradient->bounding_box; status=MagickTrue; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,bounding_box.height-bounding_box.y,1) #endif for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++) { double alpha, offset; PixelInfo composite, pixel; Quantum *magick_restrict q; ssize_t i, x; ssize_t j; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; composite=zero; offset=GetStopColorOffset(gradient,0,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++) { GetPixelInfoPixel(image,q,&pixel); switch (gradient->spread) { case UndefinedSpread: case PadSpread: { if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) || (y != CastDoubleToLong(ceil(gradient_vector->y1-0.5)))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if ((offset < 0.0) || (i == 0)) composite=gradient->stops[0].color; else if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops)) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case ReflectSpread: { if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) || (y != CastDoubleToLong(ceil(gradient_vector->y1-0.5)))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } if (offset < 0.0) offset=(-offset); if ((ssize_t) fmod(offset,2.0) == 0) offset=fmod(offset,1.0); else offset=1.0-fmod(offset,1.0); for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case RepeatSpread: { double repeat; MagickBooleanType antialias; antialias=MagickFalse; repeat=0.0; if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) || (y != CastDoubleToLong(ceil(gradient_vector->y1-0.5)))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type == LinearGradient) { repeat=fmod(offset,length); if (repeat < 0.0) repeat=length-fmod(-repeat,length); else repeat=fmod(offset,length); antialias=(repeat < length) && ((repeat+1.0) > length) ? MagickTrue : MagickFalse; offset=PerceptibleReciprocal(length)*repeat; } else { repeat=fmod(offset,gradient->radius); if (repeat < 0.0) repeat=gradient->radius-fmod(-repeat,gradient->radius); else repeat=fmod(offset,gradient->radius); antialias=repeat+1.0 > gradient->radius ? MagickTrue : MagickFalse; offset=repeat*PerceptibleReciprocal(gradient->radius); } } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); if (antialias != MagickFalse) { if (gradient->type == LinearGradient) alpha=length-repeat; else alpha=gradient->radius-repeat; i=0; j=(ssize_t) gradient->number_stops-1L; } CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } } CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha, &pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawImage() draws a graphic primitive on your image. The primitive % may be represented as a string or filename. Precede the filename with an % "at" sign (@) and the contents of the file are drawn on the image. You % can affect how text is drawn by setting one or more members of the draw % info structure. % % The format of the DrawImage method is: % % MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info, const double pad) { double extent; size_t quantum; /* Check if there is enough storage for drawing pimitives. */ quantum=sizeof(**mvg_info->primitive_info); extent=(double) mvg_info->offset+pad+(PrimitiveExtentPad+1)*quantum; if (extent <= (double) *mvg_info->extent) return(MagickTrue); if (extent == (double) CastDoubleToLong(extent)) { *mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory( *mvg_info->primitive_info,(size_t) (extent+1),quantum); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) { ssize_t i; *mvg_info->extent=(size_t) extent; for (i=mvg_info->offset+1; i <= (ssize_t) extent; i++) { (*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive; (*mvg_info->primitive_info)[i].text=(char *) NULL; } return(MagickTrue); } } /* Reallocation failed, allocate a primitive to facilitate unwinding. */ (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) *mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory( *mvg_info->primitive_info); *mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory((size_t) ( (PrimitiveExtentPad+1)*quantum)); (void) memset(*mvg_info->primitive_info,0,(size_t) ((PrimitiveExtentPad+1)* quantum)); *mvg_info->extent=1; mvg_info->offset=0; return(MagickFalse); } static inline double GetDrawValue(const char *magick_restrict string, char **magick_restrict sentinal) { char **magick_restrict q; double value; q=sentinal; value=InterpretLocaleValue(string,q); sentinal=q; return(value); } static int MVGMacroCompare(const void *target,const void *source) { const char *p, *q; p=(const char *) target; q=(const char *) source; return(strcmp(p,q)); } static SplayTreeInfo *GetMVGMacros(const char *primitive) { char *macro, *token; const char *q; size_t extent; SplayTreeInfo *macros; /* Scan graphic primitives for definitions and classes. */ if (primitive == (const char *) NULL) return((SplayTreeInfo *) NULL); macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory, RelinquishMagickMemory); macro=AcquireString(primitive); token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; for (q=primitive; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (*token == '\0') break; if (LocaleCompare("push",token) == 0) { const char *end, *start; (void) GetNextToken(q,&q,extent,token); if (*q == '"') { char name[MagickPathExtent]; const char *p; ssize_t n; /* Named macro (e.g. push graphic-context "wheel"). */ (void) GetNextToken(q,&q,extent,token); start=q; end=q; (void) CopyMagickString(name,token,MagickPathExtent); n=1; for (p=q; *p != '\0'; ) { if (GetNextToken(p,&p,extent,token) < 1) break; if (*token == '\0') break; if (LocaleCompare(token,"pop") == 0) { end=p-strlen(token)-1; n--; } if (LocaleCompare(token,"push") == 0) n++; if ((n == 0) && (end > start)) { /* Extract macro. */ (void) GetNextToken(p,&p,extent,token); (void) CopyMagickString(macro,start,(size_t) (end-start)); (void) AddValueToSplayTree(macros,ConstantString(name), ConstantString(macro)); break; } } } } } token=DestroyString(token); macro=DestroyString(macro); return(macros); } static inline MagickBooleanType IsPoint(const char *point) { char *p; double value; value=GetDrawValue(point,&p); return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse : MagickTrue); } static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info, const PointInfo point) { primitive_info->coordinates=1; primitive_info->closed_subpath=MagickFalse; primitive_info->point=point; return(MagickTrue); } static MagickBooleanType RenderMVGContent(Image *image, const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception) { #define RenderImageTag "Render/Image" AffineMatrix affine, current; char keyword[MagickPathExtent], geometry[MagickPathExtent], *next_token, pattern[MagickPathExtent], *primitive, *token; const char *q; double angle, coordinates, cursor, factor, primitive_extent; DrawInfo *clone_info, **graphic_context; MagickBooleanType proceed; MagickStatusType status; MVGInfo mvg_info; PointInfo point; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; const char *p; ssize_t i, x; SegmentInfo bounds; size_t extent, number_points, number_stops; SplayTreeInfo *macros; ssize_t defsDepth, j, k, n, symbolDepth; StopInfo *stops; TypeMetric metrics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (depth > MagickMaxRecursionDepth) ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply", image->filename); if ((draw_info->primitive == (char *) NULL) || (*draw_info->primitive == '\0')) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image"); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) { status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); if (status == MagickFalse) return(MagickFalse); } if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) && (*(draw_info->primitive+1) != '-') && (depth == 0)) primitive=FileToString(draw_info->primitive+1,~0UL,exception); else primitive=AcquireString(draw_info->primitive); if (primitive == (char *) NULL) return(MagickFalse); primitive_extent=(double) strlen(primitive); (void) SetImageArtifact(image,"mvg:vector-graphics",primitive); n=0; number_stops=0; stops=(StopInfo *) NULL; /* Allocate primitive info memory. */ graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { primitive=DestroyString(primitive); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } number_points=(size_t) PrimitiveExtentPad; primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (number_points+1),sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(primitive_info,0,(size_t) (number_points+1)* sizeof(*primitive_info)); (void) memset(&mvg_info,0,sizeof(mvg_info)); mvg_info.primitive_info=(&primitive_info); mvg_info.extent=(&number_points); mvg_info.exception=exception; graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info); graphic_context[n]->viewbox=image->page; if ((image->page.width == 0) || (image->page.height == 0)) { graphic_context[n]->viewbox.width=image->columns; graphic_context[n]->viewbox.height=image->rows; } token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; defsDepth=0; symbolDepth=0; cursor=0.0; macros=GetMVGMacros(primitive); status=MagickTrue; for (q=primitive; *q != '\0'; ) { /* Interpret graphic primitive. */ if (GetNextToken(q,&q,MagickPathExtent,keyword) < 1) break; if (*keyword == '\0') break; if (*keyword == '#') { /* Comment. */ while ((*q != '\n') && (*q != '\0')) q++; continue; } p=q-strlen(keyword)-1; primitive_type=UndefinedPrimitive; current=graphic_context[n]->affine; GetAffineMatrix(&affine); *token='\0'; switch (*keyword) { case ';': break; case 'a': case 'A': { if (LocaleCompare("affine",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.sx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.rx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ry=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.sy=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.tx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ty=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("alpha",keyword) == 0) { primitive_type=AlphaPrimitive; break; } if (LocaleCompare("arc",keyword) == 0) { primitive_type=ArcPrimitive; break; } status=MagickFalse; break; } case 'b': case 'B': { if (LocaleCompare("bezier",keyword) == 0) { primitive_type=BezierPrimitive; break; } if (LocaleCompare("border-color",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->border_color,exception); break; } status=MagickFalse; break; } case 'c': case 'C': { if (LocaleCompare("class",keyword) == 0) { const char *mvg_class; (void) GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } if (LocaleCompare(token,graphic_context[n]->id) == 0) break; mvg_class=(const char *) GetValueFromSplayTree(macros,token); if ((graphic_context[n]->render != MagickFalse) && (mvg_class != (const char *) NULL) && (p > primitive)) { char *elements; ssize_t offset; /* Inject class elements in stream. */ offset=(ssize_t) (p-primitive); elements=AcquireString(primitive); elements[offset]='\0'; (void) ConcatenateString(&elements,mvg_class); (void) ConcatenateString(&elements,"\n"); (void) ConcatenateString(&elements,q); primitive=DestroyString(primitive); primitive=elements; q=primitive+offset; } break; } if (LocaleCompare("clip-path",keyword) == 0) { const char *clip_path; /* Take a node from within the MVG document, and duplicate it here. */ (void) GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } (void) CloneString(&graphic_context[n]->clip_mask,token); clip_path=(const char *) GetValueFromSplayTree(macros,token); if (clip_path != (const char *) NULL) { if (graphic_context[n]->clipping_mask != (Image *) NULL) graphic_context[n]->clipping_mask= DestroyImage(graphic_context[n]->clipping_mask); graphic_context[n]->clipping_mask=DrawClippingMask(image, graphic_context[n],token,clip_path,exception); if (graphic_context[n]->compliance != SVGCompliance) { clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image, graphic_context[n]->clip_mask,clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); } } break; } if (LocaleCompare("clip-rule",keyword) == 0) { ssize_t fill_rule; (void) GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("clip-units",keyword) == 0) { ssize_t clip_units; (void) GetNextToken(q,&q,extent,token); clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse, token); if (clip_units == -1) { status=MagickFalse; break; } graphic_context[n]->clip_units=(ClipPathUnits) clip_units; if (clip_units == ObjectBoundingBox) { GetAffineMatrix(&current); affine.sx=draw_info->bounds.x2; affine.sy=draw_info->bounds.y2; affine.tx=draw_info->bounds.x1; affine.ty=draw_info->bounds.y1; break; } break; } if (LocaleCompare("circle",keyword) == 0) { primitive_type=CirclePrimitive; break; } if (LocaleCompare("color",keyword) == 0) { primitive_type=ColorPrimitive; break; } if (LocaleCompare("compliance",keyword) == 0) { /* MVG compliance associates a clipping mask with an image; SVG compliance associates a clipping mask with a graphics context. */ (void) GetNextToken(q,&q,extent,token); graphic_context[n]->compliance=(ComplianceType) ParseCommandOption( MagickComplianceOptions,MagickFalse,token); break; } if (LocaleCompare("currentColor",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } status=MagickFalse; break; } case 'd': case 'D': { if (LocaleCompare("decorate",keyword) == 0) { ssize_t decorate; (void) GetNextToken(q,&q,extent,token); decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse, token); if (decorate == -1) { status=MagickFalse; break; } graphic_context[n]->decorate=(DecorationType) decorate; break; } if (LocaleCompare("density",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->density,token); break; } if (LocaleCompare("direction",keyword) == 0) { ssize_t direction; (void) GetNextToken(q,&q,extent,token); direction=ParseCommandOption(MagickDirectionOptions,MagickFalse, token); if (direction == -1) status=MagickFalse; else graphic_context[n]->direction=(DirectionType) direction; break; } status=MagickFalse; break; } case 'e': case 'E': { if (LocaleCompare("ellipse",keyword) == 0) { primitive_type=EllipsePrimitive; break; } if (LocaleCompare("encoding",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->encoding,token); break; } status=MagickFalse; break; } case 'f': case 'F': { if (LocaleCompare("fill",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->fill_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->fill,exception); if (graphic_context[n]->fill_alpha != OpaqueAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; } break; } if (LocaleCompare("fill-opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) graphic_context[n]->fill_alpha*=opacity; else graphic_context[n]->fill_alpha=QuantumRange*opacity; if (graphic_context[n]->fill.alpha != TransparentAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; else graphic_context[n]->fill.alpha=(MagickRealType) ClampToQuantum(QuantumRange*(1.0-opacity)); break; } if (LocaleCompare("fill-rule",keyword) == 0) { ssize_t fill_rule; (void) GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("font",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->font,token); if (LocaleCompare("none",token) == 0) graphic_context[n]->font=(char *) RelinquishMagickMemory( graphic_context[n]->font); break; } if (LocaleCompare("font-family",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->family,token); break; } if (LocaleCompare("font-size",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->pointsize=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("font-stretch",keyword) == 0) { ssize_t stretch; (void) GetNextToken(q,&q,extent,token); stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token); if (stretch == -1) { status=MagickFalse; break; } graphic_context[n]->stretch=(StretchType) stretch; break; } if (LocaleCompare("font-style",keyword) == 0) { ssize_t style; (void) GetNextToken(q,&q,extent,token); style=ParseCommandOption(MagickStyleOptions,MagickFalse,token); if (style == -1) { status=MagickFalse; break; } graphic_context[n]->style=(StyleType) style; break; } if (LocaleCompare("font-weight",keyword) == 0) { ssize_t weight; (void) GetNextToken(q,&q,extent,token); weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(token); graphic_context[n]->weight=(size_t) weight; break; } status=MagickFalse; break; } case 'g': case 'G': { if (LocaleCompare("gradient-units",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("gravity",keyword) == 0) { ssize_t gravity; (void) GetNextToken(q,&q,extent,token); gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token); if (gravity == -1) { status=MagickFalse; break; } graphic_context[n]->gravity=(GravityType) gravity; break; } status=MagickFalse; break; } case 'i': case 'I': { if (LocaleCompare("image",keyword) == 0) { ssize_t compose; primitive_type=ImagePrimitive; (void) GetNextToken(q,&q,extent,token); compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token); if (compose == -1) { status=MagickFalse; break; } graphic_context[n]->compose=(CompositeOperator) compose; break; } if (LocaleCompare("interline-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interline_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("interword-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'k': case 'K': { if (LocaleCompare("kerning",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->kerning=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'l': case 'L': { if (LocaleCompare("letter-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (IsPoint(token) == MagickFalse) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); clone_info->text=AcquireString(" "); status&=GetTypeMetrics(image,clone_info,&metrics,exception); graphic_context[n]->kerning=metrics.width* GetDrawValue(token,&next_token); clone_info=DestroyDrawInfo(clone_info); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("line",keyword) == 0) { primitive_type=LinePrimitive; break; } status=MagickFalse; break; } case 'm': case 'M': { if (LocaleCompare("mask",keyword) == 0) { const char *mask_path; /* Take a node from within the MVG document, and duplicate it here. */ (void) GetNextToken(q,&q,extent,token); mask_path=(const char *) GetValueFromSplayTree(macros,token); if (mask_path != (const char *) NULL) { if (graphic_context[n]->composite_mask != (Image *) NULL) graphic_context[n]->composite_mask= DestroyImage(graphic_context[n]->composite_mask); graphic_context[n]->composite_mask=DrawCompositeMask(image, graphic_context[n],token,mask_path,exception); if (graphic_context[n]->compliance != SVGCompliance) status=SetImageMask(image,CompositePixelMask, graphic_context[n]->composite_mask,exception); } break; } status=MagickFalse; break; } case 'o': case 'O': { if (LocaleCompare("offset",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) { graphic_context[n]->fill_alpha*=opacity; graphic_context[n]->stroke_alpha*=opacity; } else { graphic_context[n]->fill_alpha=QuantumRange*opacity; graphic_context[n]->stroke_alpha=QuantumRange*opacity; } break; } status=MagickFalse; break; } case 'p': case 'P': { if (LocaleCompare("path",keyword) == 0) { primitive_type=PathPrimitive; break; } if (LocaleCompare("point",keyword) == 0) { primitive_type=PointPrimitive; break; } if (LocaleCompare("polyline",keyword) == 0) { primitive_type=PolylinePrimitive; break; } if (LocaleCompare("polygon",keyword) == 0) { primitive_type=PolygonPrimitive; break; } if (LocaleCompare("pop",keyword) == 0) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare("class",token) == 0) break; if (LocaleCompare("clip-path",token) == 0) break; if (LocaleCompare("defs",token) == 0) { defsDepth--; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) break; if (LocaleCompare("graphic-context",token) == 0) { if (n <= 0) { (void) ThrowMagickException(exception,GetMagickModule(), DrawError,"UnbalancedGraphicContextPushPop","`%s'",token); status=MagickFalse; n=0; break; } if ((graphic_context[n]->clip_mask != (char *) NULL) && (graphic_context[n]->compliance != SVGCompliance)) if (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0) status=SetImageMask(image,WritePixelMask,(Image *) NULL, exception); graphic_context[n]=DestroyDrawInfo(graphic_context[n]); n--; break; } if (LocaleCompare("mask",token) == 0) break; if (LocaleCompare("pattern",token) == 0) break; if (LocaleCompare("symbol",token) == 0) { symbolDepth--; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } if (LocaleCompare("push",keyword) == 0) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare("class",token) == 0) { /* Class context. */ for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"class") != 0) continue; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("clip-path",token) == 0) { (void) GetNextToken(q,&q,extent,token); for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"clip-path") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("defs",token) == 0) { defsDepth++; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent], type[MagickPathExtent]; SegmentInfo segment; (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(type,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); segment.x1=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.y1=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.x2=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.y2=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (LocaleCompare(type,"radial") == 0) { (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); } for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"gradient") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); bounds.x1=graphic_context[n]->affine.sx*segment.x1+ graphic_context[n]->affine.ry*segment.y1+ graphic_context[n]->affine.tx; bounds.y1=graphic_context[n]->affine.rx*segment.x1+ graphic_context[n]->affine.sy*segment.y1+ graphic_context[n]->affine.ty; bounds.x2=graphic_context[n]->affine.sx*segment.x2+ graphic_context[n]->affine.ry*segment.y2+ graphic_context[n]->affine.tx; bounds.y2=graphic_context[n]->affine.rx*segment.x2+ graphic_context[n]->affine.sy*segment.y2+ graphic_context[n]->affine.ty; (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-type",name); (void) SetImageArtifact(image,key,type); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%gx%g%+.15g%+.15g", MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0), MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0), bounds.x1,bounds.y1); (void) SetImageArtifact(image,key,geometry); (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("graphic-context",token) == 0) { n++; graphic_context=(DrawInfo **) ResizeQuantumMemory( graphic_context,(size_t) (n+1),sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL, graphic_context[n-1]); if (*q == '"') { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->id,token); } break; } if (LocaleCompare("mask",token) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("pattern",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent]; RectangleInfo region; (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); region.x=CastDoubleToLong(ceil(GetDrawValue(token, &next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); region.y=CastDoubleToLong(ceil(GetDrawValue(token, &next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); region.width=(size_t) CastDoubleToLong(floor(GetDrawValue( token,&next_token)+0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); region.height=(size_t) floor(GetDrawValue(token,&next_token)+ 0.5); if (token == next_token) ThrowPointExpectedException(token,exception); for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"pattern") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) region.width,(double) region.height,(double) region.x,(double) region.y); (void) SetImageArtifact(image,key,geometry); (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("symbol",token) == 0) { symbolDepth++; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } status=MagickFalse; break; } case 'r': case 'R': { if (LocaleCompare("rectangle",keyword) == 0) { primitive_type=RectanglePrimitive; break; } if (LocaleCompare("rotate",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0))); affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0))); affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0)))); affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0))); break; } if (LocaleCompare("roundRectangle",keyword) == 0) { primitive_type=RoundRectanglePrimitive; break; } status=MagickFalse; break; } case 's': case 'S': { if (LocaleCompare("scale",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.sx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.sy=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("skewX",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.ry=sin(DegreesToRadians(angle)); break; } if (LocaleCompare("skewY",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.rx=(-tan(DegreesToRadians(angle)/2.0)); break; } if (LocaleCompare("stop-color",keyword) == 0) { PixelInfo stop_color; number_stops++; if (number_stops == 1) stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops)); else if (number_stops > 2) stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops, sizeof(*stops)); if (stops == (StopInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance,&stop_color, exception); stops[number_stops-1].color=stop_color; (void) GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; stops[number_stops-1].offset=factor*GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("stroke",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->stroke_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->stroke,exception); if (graphic_context[n]->stroke_alpha != OpaqueAlpha) graphic_context[n]->stroke.alpha= graphic_context[n]->stroke_alpha; } break; } if (LocaleCompare("stroke-antialias",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("stroke-dasharray",keyword) == 0) { if (graphic_context[n]->dash_pattern != (double *) NULL) graphic_context[n]->dash_pattern=(double *) RelinquishMagickMemory(graphic_context[n]->dash_pattern); if (IsPoint(q) != MagickFalse) { const char *r; r=q; (void) GetNextToken(r,&r,extent,token); if (*token == ',') (void) GetNextToken(r,&r,extent,token); for (x=0; IsPoint(token) != MagickFalse; x++) { (void) GetNextToken(r,&r,extent,token); if (*token == ',') (void) GetNextToken(r,&r,extent,token); } graphic_context[n]->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*graphic_context[n]->dash_pattern)); if (graphic_context[n]->dash_pattern == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); status=MagickFalse; break; } (void) memset(graphic_context[n]->dash_pattern,0,(size_t) (2*x+2)*sizeof(*graphic_context[n]->dash_pattern)); for (j=0; j < x; j++) { (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->dash_pattern[j]=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->dash_pattern[j] < 0.0) status=MagickFalse; } if ((x & 0x01) != 0) for ( ; j < (2*x); j++) graphic_context[n]->dash_pattern[j]= graphic_context[n]->dash_pattern[j-x]; graphic_context[n]->dash_pattern[j]=0.0; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("stroke-dashoffset",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->dash_offset=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("stroke-linecap",keyword) == 0) { ssize_t linecap; (void) GetNextToken(q,&q,extent,token); linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token); if (linecap == -1) { status=MagickFalse; break; } graphic_context[n]->linecap=(LineCap) linecap; break; } if (LocaleCompare("stroke-linejoin",keyword) == 0) { ssize_t linejoin; (void) GetNextToken(q,&q,extent,token); linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse, token); if (linejoin == -1) { status=MagickFalse; break; } graphic_context[n]->linejoin=(LineJoin) linejoin; break; } if (LocaleCompare("stroke-miterlimit",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->miterlimit=StringToUnsignedLong(token); break; } if (LocaleCompare("stroke-opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) graphic_context[n]->stroke_alpha*=opacity; else graphic_context[n]->stroke_alpha=QuantumRange*opacity; if (graphic_context[n]->stroke.alpha != TransparentAlpha) graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha; else graphic_context[n]->stroke.alpha=(MagickRealType) ClampToQuantum(QuantumRange*(1.0-opacity)); break; } if (LocaleCompare("stroke-width",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; graphic_context[n]->stroke_width=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 't': case 'T': { if (LocaleCompare("text",keyword) == 0) { primitive_type=TextPrimitive; cursor=0.0; break; } if (LocaleCompare("text-align",keyword) == 0) { ssize_t align; (void) GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-anchor",keyword) == 0) { ssize_t align; (void) GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-antialias",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->text_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("text-undercolor",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->undercolor,exception); break; } if (LocaleCompare("translate",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.tx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ty=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); cursor=0.0; break; } status=MagickFalse; break; } case 'u': case 'U': { if (LocaleCompare("use",keyword) == 0) { const char *use; /* Get a macro from the MVG document, and "use" it here. */ (void) GetNextToken(q,&q,extent,token); use=(const char *) GetValueFromSplayTree(macros,token); if (use != (const char *) NULL) { clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); (void) CloneString(&clone_info->primitive,use); status=RenderMVGContent(image,clone_info,depth+1,exception); clone_info=DestroyDrawInfo(clone_info); } break; } status=MagickFalse; break; } case 'v': case 'V': { if (LocaleCompare("viewbox",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.x=CastDoubleToLong(ceil( GetDrawValue(token,&next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.y=CastDoubleToLong(ceil( GetDrawValue(token,&next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.width=(size_t) CastDoubleToLong( floor(GetDrawValue(token,&next_token)+0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.height=(size_t) CastDoubleToLong( floor(GetDrawValue(token,&next_token)+0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'w': case 'W': { if (LocaleCompare("word-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } default: { status=MagickFalse; break; } } if (status == MagickFalse) break; if ((fabs(affine.sx-1.0) >= MagickEpsilon) || (fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) || (fabs(affine.sy-1.0) >= MagickEpsilon) || (fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon)) { graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx; graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx; graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy; graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy; graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+ current.tx; graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+ current.ty; } if (primitive_type == UndefinedPrimitive) { if (*q == '\0') { if (number_stops > 1) { GradientType type; type=LinearGradient; if (draw_info->gradient.type == RadialGradient) type=RadialGradient; (void) GradientImage(image,type,PadSpread,stops,number_stops, exception); } if (number_stops > 0) stops=(StopInfo *) RelinquishMagickMemory(stops); } if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),p); continue; } /* Parse the primitive attributes. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); i=0; mvg_info.offset=i; j=0; primitive_info[0].point.x=0.0; primitive_info[0].point.y=0.0; primitive_info[0].coordinates=0; primitive_info[0].method=FloodfillMethod; primitive_info[0].closed_subpath=MagickFalse; for (x=0; *q != '\0'; x++) { /* Define points. */ if (IsPoint(q) == MagickFalse) break; (void) GetNextToken(q,&q,extent,token); point.x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); point.y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,(const char **) NULL,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); primitive_info[i].primitive=primitive_type; primitive_info[i].point=point; primitive_info[i].coordinates=0; primitive_info[i].method=FloodfillMethod; primitive_info[i].closed_subpath=MagickFalse; i++; mvg_info.offset=i; if (i < (ssize_t) number_points) continue; status&=CheckPrimitiveExtent(&mvg_info,(double) number_points); } if (status == MagickFalse) break; if ((primitive_info[j].primitive == TextPrimitive) || (primitive_info[j].primitive == ImagePrimitive)) if (primitive_info[j].text != (char *) NULL) primitive_info[j].text=DestroyString(primitive_info[j].text); primitive_info[j].primitive=primitive_type; primitive_info[j].coordinates=(size_t) x; primitive_info[j].method=FloodfillMethod; primitive_info[j].closed_subpath=MagickFalse; /* Circumscribe primitive within a circle. */ bounds.x1=primitive_info[j].point.x; bounds.y1=primitive_info[j].point.y; bounds.x2=primitive_info[j].point.x; bounds.y2=primitive_info[j].point.y; for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++) { point=primitive_info[j+k].point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.y < bounds.y1) bounds.y1=point.y; if (point.x > bounds.x2) bounds.x2=point.x; if (point.y > bounds.y2) bounds.y2=point.y; } /* Speculate how many points our primitive might consume. */ coordinates=(double) primitive_info[j].coordinates; switch (primitive_type) { case RectanglePrimitive: { coordinates*=5.0; break; } case RoundRectanglePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot(alpha,beta); coordinates*=5.0; coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0* BezierQuantum+360.0; break; } case BezierPrimitive: { coordinates=(BezierQuantum*(double) primitive_info[j].coordinates); break; } case PathPrimitive: { char *s, *t; (void) GetNextToken(q,&q,extent,token); coordinates=1.0; t=token; for (s=token; *s != '\0'; s=t) { double value; value=GetDrawValue(s,&t); (void) value; if (s == t) { t++; continue; } coordinates++; } for (s=token; *s != '\0'; s++) if (strspn(s,"AaCcQqSsTt") != 0) coordinates+=(20.0*BezierQuantum)+360.0; break; } default: break; } if (status == MagickFalse) break; if (((size_t) (i+coordinates)) >= number_points) { /* Resize based on speculative points required by primitive. */ number_points+=coordinates+1; if (number_points < (size_t) coordinates) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } mvg_info.offset=i; status&=CheckPrimitiveExtent(&mvg_info,(double) number_points); } status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad); if (status == MagickFalse) break; mvg_info.offset=j; switch (primitive_type) { case PointPrimitive: default: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } status&=TracePoint(primitive_info+j,primitive_info[j].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case LinePrimitive: { double dx, dy, maximum_length; if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot(dx,dy); if (maximum_length > (MaxBezierCoordinates/100.0)) ThrowPointExpectedException(keyword,exception); status&=TraceLine(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RectanglePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceRectangle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RoundRectanglePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+2].point.x < 0.0) || (primitive_info[j+2].point.y < 0.0)) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0) { status=MagickFalse; break; } if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0) { status=MagickFalse; break; } status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case ArcPrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } status&=TraceArc(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case EllipsePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x < 0.0) || (primitive_info[j+1].point.y < 0.0)) { status=MagickFalse; break; } status&=TraceEllipse(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case CirclePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceCircle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PolylinePrimitive: { if (primitive_info[j].coordinates < 1) { status=MagickFalse; break; } break; } case PolygonPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } primitive_info[i]=primitive_info[j]; primitive_info[i].coordinates=0; primitive_info[j].coordinates++; primitive_info[j].closed_subpath=MagickTrue; i++; break; } case BezierPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } status&=TraceBezier(&mvg_info,primitive_info[j].coordinates); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PathPrimitive: { coordinates=(double) TracePath(&mvg_info,token,exception); if (coordinates < 0.0) { status=MagickFalse; break; } i=(ssize_t) (j+coordinates); break; } case AlphaPrimitive: case ColorPrimitive: { ssize_t method; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); method=ParseCommandOption(MagickMethodOptions,MagickFalse,token); if (method == -1) { status=MagickFalse; break; } primitive_info[j].method=(PaintMethod) method; break; } case TextPrimitive: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } if (*token != ',') (void) GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); /* Compute text cursor offset. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) && (fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon)) { mvg_info.point=primitive_info->point; primitive_info->point.x+=cursor; } else { mvg_info.point=primitive_info->point; cursor=0.0; } clone_info->render=MagickFalse; clone_info->text=AcquireString(token); status&=GetTypeMetrics(image,clone_info,&metrics,exception); clone_info=DestroyDrawInfo(clone_info); cursor+=metrics.width; if (graphic_context[n]->compliance != SVGCompliance) cursor=0.0; break; } case ImagePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); break; } } mvg_info.offset=i; if (status == 0) break; primitive_info[i].primitive=UndefinedPrimitive; if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1), p); /* Sanity check. */ status&=CheckPrimitiveExtent(&mvg_info,ExpandAffine( &graphic_context[n]->affine)); if (status == 0) break; status&=CheckPrimitiveExtent(&mvg_info,(double) graphic_context[n]->stroke_width); if (status == 0) break; if (i == 0) continue; /* Transform points. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+ graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx; primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+ graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty; point=primitive_info[i].point; if (point.x < graphic_context[n]->bounds.x1) graphic_context[n]->bounds.x1=point.x; if (point.y < graphic_context[n]->bounds.y1) graphic_context[n]->bounds.y1=point.y; if (point.x > graphic_context[n]->bounds.x2) graphic_context[n]->bounds.x2=point.x; if (point.y > graphic_context[n]->bounds.y2) graphic_context[n]->bounds.y2=point.y; if (primitive_info[i].primitive == ImagePrimitive) break; if (i >= (ssize_t) number_points) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); } if (graphic_context[n]->render != MagickFalse) { if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) && (graphic_context[n]->clip_mask != (char *) NULL) && (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0)) { const char *clip_path; clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image,graphic_context[n]->clip_mask, clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); } status&=DrawPrimitive(image,graphic_context[n],primitive_info, exception); } proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType) primitive_extent); if (proceed == MagickFalse) break; if (status == 0) break; } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image"); /* Relinquish resources. */ macros=DestroySplayTree(macros); token=DestroyString(token); if (primitive_info != (PrimitiveInfo *) NULL) { for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info); } primitive=DestroyString(primitive); if (stops != (StopInfo *) NULL) stops=(StopInfo *) RelinquishMagickMemory(stops); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); if (status == MagickFalse) ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition", keyword); return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, ExceptionInfo *exception) { return(RenderMVGContent(image,draw_info,0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P a t t e r n P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPatternPath() draws a pattern. % % The format of the DrawPatternPath method is: % % MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info, % const char *name,Image **pattern,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o name: the pattern name. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawPatternPath(Image *image, const DrawInfo *draw_info,const char *name,Image **pattern, ExceptionInfo *exception) { char property[MagickPathExtent]; const char *geometry, *path, *type; DrawInfo *clone_info; ImageInfo *image_info; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); assert(name != (const char *) NULL); (void) FormatLocaleString(property,MagickPathExtent,"%s",name); path=GetImageArtifact(image,property); if (path == (const char *) NULL) return(MagickFalse); (void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name); geometry=GetImageArtifact(image,property); if (geometry == (const char *) NULL) return(MagickFalse); if ((*pattern) != (Image *) NULL) *pattern=DestroyImage(*pattern); image_info=AcquireImageInfo(); image_info->size=AcquireString(geometry); *pattern=AcquireImage(image_info,exception); image_info=DestroyImageInfo(image_info); (void) QueryColorCompliance("#00000000",AllCompliance, &(*pattern)->background_color,exception); (void) SetImageBackgroundColor(*pattern,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), "begin pattern-path %s %s",name,geometry); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=DestroyImage(clone_info->stroke_pattern); (void) FormatLocaleString(property,MagickPathExtent,"%s-type",name); type=GetImageArtifact(image,property); if (type != (const char *) NULL) clone_info->gradient.type=(GradientType) ParseCommandOption( MagickGradientOptions,MagickFalse,type); (void) CloneString(&clone_info->primitive,path); status=RenderMVGContent(*pattern,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w P o l y g o n P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPolygonPrimitive() draws a polygon on the image. % % The format of the DrawPolygonPrimitive method is: % % MagickBooleanType DrawPolygonPrimitive(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info) { ssize_t i; assert(polygon_info != (PolygonInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (polygon_info[i] != (PolygonInfo *) NULL) polygon_info[i]=DestroyPolygonInfo(polygon_info[i]); polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info); return(polygon_info); } static PolygonInfo **AcquirePolygonThreadSet( const PrimitiveInfo *primitive_info,ExceptionInfo *exception) { PathInfo *magick_restrict path_info; PolygonInfo **polygon_info; ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads, sizeof(*polygon_info)); if (polygon_info == (PolygonInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PolygonInfo **) NULL); } (void) memset(polygon_info,0,number_threads*sizeof(*polygon_info)); path_info=ConvertPrimitiveToPath(primitive_info,exception); if (path_info == (PathInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); polygon_info[0]=ConvertPathToPolygon(path_info,exception); if (polygon_info[0] == (PolygonInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonThreadSet(polygon_info)); } for (i=1; i < (ssize_t) number_threads; i++) { EdgeInfo *edge_info; ssize_t j; polygon_info[i]=(PolygonInfo *) AcquireMagickMemory( sizeof(*polygon_info[i])); if (polygon_info[i] == (PolygonInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonThreadSet(polygon_info)); } polygon_info[i]->number_edges=0; edge_info=polygon_info[0]->edges; polygon_info[i]->edges=(EdgeInfo *) AcquireQuantumMemory( polygon_info[0]->number_edges,sizeof(*edge_info)); if (polygon_info[i]->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonThreadSet(polygon_info)); } (void) memcpy(polygon_info[i]->edges,edge_info, polygon_info[0]->number_edges*sizeof(*edge_info)); for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++) polygon_info[i]->edges[j].points=(PointInfo *) NULL; polygon_info[i]->number_edges=polygon_info[0]->number_edges; for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++) { edge_info=polygon_info[0]->edges+j; polygon_info[i]->edges[j].points=(PointInfo *) AcquireQuantumMemory( edge_info->number_points,sizeof(*edge_info)); if (polygon_info[i]->edges[j].points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonThreadSet(polygon_info)); } (void) memcpy(polygon_info[i]->edges[j].points,edge_info->points, edge_info->number_points*sizeof(*edge_info->points)); } } path_info=(PathInfo *) RelinquishMagickMemory(path_info); return(polygon_info); } static size_t DestroyEdge(PolygonInfo *polygon_info,const ssize_t edge) { assert(edge < (ssize_t) polygon_info->number_edges); polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory( polygon_info->edges[edge].points); polygon_info->number_edges--; if (edge < (ssize_t) polygon_info->number_edges) (void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1, (size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges)); return(polygon_info->number_edges); } static double GetFillAlpha(PolygonInfo *polygon_info,const double mid, const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x, const ssize_t y,double *stroke_alpha) { double alpha, beta, distance, subpath_alpha; PointInfo delta; const PointInfo *q; EdgeInfo *p; ssize_t i; ssize_t j, winding_number; /* Compute fill & stroke opacity for this (x,y) point. */ *stroke_alpha=0.0; subpath_alpha=0.0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= (p->bounds.y1-mid-0.5)) break; if ((double) y > (p->bounds.y2+mid+0.5)) { (void) DestroyEdge(polygon_info,j); continue; } if (((double) x <= (p->bounds.x1-mid-0.5)) || ((double) x > (p->bounds.x2+mid+0.5))) continue; i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) p->number_points; i++) { if ((double) y <= (p->points[i-1].y-mid-0.5)) break; if ((double) y > (p->points[i].y+mid+0.5)) continue; if (p->scanline != (double) y) { p->scanline=(double) y; p->highwater=(size_t) i; } /* Compute distance between a point and an edge. */ q=p->points+i-1; delta.x=(q+1)->x-q->x; delta.y=(q+1)->y-q->y; beta=delta.x*(x-q->x)+delta.y*(y-q->y); if (beta <= 0.0) { delta.x=(double) x-q->x; delta.y=(double) y-q->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=delta.x*delta.x+delta.y*delta.y; if (beta >= alpha) { delta.x=(double) x-(q+1)->x; delta.y=(double) y-(q+1)->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=PerceptibleReciprocal(alpha); beta=delta.x*(y-q->y)-delta.y*(x-q->x)+MagickEpsilon; distance=alpha*beta*beta; } } /* Compute stroke & subpath opacity. */ beta=0.0; if (p->ghostline == MagickFalse) { alpha=mid+0.5; if ((*stroke_alpha < 1.0) && (distance <= ((alpha+0.25)*(alpha+0.25)))) { alpha=mid-0.5; if (distance <= ((alpha+0.25)*(alpha+0.25))) *stroke_alpha=1.0; else { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt((double) distance); alpha=beta-mid-0.5; if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25))) *stroke_alpha=(alpha-0.25)*(alpha-0.25); } } } if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0)) continue; if (distance <= 0.0) { subpath_alpha=1.0; continue; } if (distance > 1.0) continue; if (fabs(beta) < MagickEpsilon) { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt(distance); } alpha=beta-1.0; if (subpath_alpha < (alpha*alpha)) subpath_alpha=alpha*alpha; } } /* Compute fill opacity. */ if (fill == MagickFalse) return(0.0); if (subpath_alpha >= 1.0) return(1.0); /* Determine winding number. */ winding_number=0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= p->bounds.y1) break; if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1)) continue; if ((double) x > p->bounds.x2) { winding_number+=p->direction ? 1 : -1; continue; } i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) (p->number_points-1); i++) if ((double) y <= p->points[i].y) break; q=p->points+i-1; if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x))) winding_number+=p->direction ? 1 : -1; } if (fill_rule != NonZeroRule) { if ((MagickAbsoluteValue(winding_number) & 0x01) != 0) return(1.0); } else if (MagickAbsoluteValue(winding_number) != 0) return(1.0); return(subpath_alpha); } static MagickBooleanType DrawPolygonPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; const char *artifact; MagickBooleanType fill, status; double mid; PolygonInfo **magick_restrict polygon_info; EdgeInfo *p; ssize_t i; SegmentInfo bounds; ssize_t start_y, stop_y, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); assert(primitive_info != (PrimitiveInfo *) NULL); if (primitive_info->coordinates <= 1) return(MagickTrue); /* Compute bounding box. */ polygon_info=AcquirePolygonThreadSet(primitive_info,exception); if (polygon_info == (PolygonInfo **) NULL) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon"); fill=(primitive_info->method == FillToBorderMethod) || (primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse; mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; bounds=polygon_info[0]->edges[0].bounds; artifact=GetImageArtifact(image,"draw:render-bounding-rectangles"); if (IsStringTrue(artifact) != MagickFalse) (void) DrawBoundingRectangles(image,draw_info,polygon_info[0],exception); for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++) { p=polygon_info[0]->edges+i; if (p->bounds.x1 < bounds.x1) bounds.x1=p->bounds.x1; if (p->bounds.y1 < bounds.y1) bounds.y1=p->bounds.y1; if (p->bounds.x2 > bounds.x2) bounds.x2=p->bounds.x2; if (p->bounds.y2 > bounds.y2) bounds.y2=p->bounds.y2; } bounds.x1-=(mid+1.0); bounds.y1-=(mid+1.0); bounds.x2+=(mid+1.0); bounds.y2+=(mid+1.0); if ((bounds.x1 >= (double) image->columns) || (bounds.y1 >= (double) image->rows) || (bounds.x2 <= 0.0) || (bounds.y2 <= 0.0)) { polygon_info=DestroyPolygonThreadSet(polygon_info); return(MagickTrue); /* virtual polygon */ } bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x1; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y1; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x2; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y2; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); if ((primitive_info->coordinates == 1) || (polygon_info[0]->number_edges == 0)) { /* Draw point. */ start_y=CastDoubleToLong(ceil(bounds.y1-0.5)); stop_y=CastDoubleToLong(floor(bounds.y2+0.5)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { MagickBooleanType sync; PixelInfo pixel; ssize_t x; Quantum *magick_restrict q; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=CastDoubleToLong(ceil(bounds.x1-0.5)); stop_x=CastDoubleToLong(floor(bounds.x2+0.5)); x=start_x; q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for ( ; x <= stop_x; x++) { if ((x == CastDoubleToLong(ceil(primitive_info->point.x-0.5))) && (y == CastDoubleToLong(ceil(primitive_info->point.y-0.5)))) { GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-polygon"); return(status); } /* Draw polygon or line. */ start_y=CastDoubleToLong(ceil(bounds.y1-0.5)); stop_y=CastDoubleToLong(floor(bounds.y2+0.5)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=CastDoubleToLong(ceil(bounds.x1-0.5)); stop_x=CastDoubleToLong(floor(bounds.x2+0.5)); q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+ 1),1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=start_x; x <= stop_x; x++) { double fill_alpha, stroke_alpha; PixelInfo fill_color, stroke_color; /* Fill and/or stroke. */ fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule, x,y,&stroke_alpha); if (draw_info->stroke_antialias == MagickFalse) { fill_alpha=fill_alpha > 0.5 ? 1.0 : 0.0; stroke_alpha=stroke_alpha > 0.5 ? 1.0 : 0.0; } GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception); CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q, (double) GetPixelAlpha(image,q),q); GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception); CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q, (double) GetPixelAlpha(image,q),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image. % % The format of the DrawPrimitive method is: % % MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info, % PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info) { const char *methods[] = { "point", "replace", "floodfill", "filltoborder", "reset", "?" }; PointInfo p, point, q; ssize_t i, x; ssize_t coordinates, y; x=CastDoubleToLong(ceil(primitive_info->point.x-0.5)); y=CastDoubleToLong(ceil(primitive_info->point.y-0.5)); switch (primitive_info->primitive) { case AlphaPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ColorPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ColorPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ImagePrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ImagePrimitive %.20g,%.20g",(double) x,(double) y); return; } case PointPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "PointPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case TextPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "TextPrimitive %.20g,%.20g",(double) x,(double) y); return; } default: break; } coordinates=0; p=primitive_info[0].point; q.x=(-1.0); q.y=(-1.0); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; if (coordinates <= 0) { coordinates=(ssize_t) primitive_info[i].coordinates; (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin open (%.20g)",(double) coordinates); p=point; } point=primitive_info[i].point; if ((fabs(q.x-point.x) >= MagickEpsilon) || (fabs(q.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y); else (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y); q=point; coordinates--; if (coordinates > 0) continue; if ((fabs(p.x-point.x) >= MagickEpsilon) || (fabs(p.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)", (double) coordinates); else (void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)", (double) coordinates); } } MagickExport MagickBooleanType DrawPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickStatusType status; ssize_t i, x; ssize_t y; if (image->debug != MagickFalse) { (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-primitive"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx, draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy, draw_info->affine.tx,draw_info->affine.ty); } status=MagickTrue; if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsPixelInfoGray(&draw_info->fill) == MagickFalse) || (IsPixelInfoGray(&draw_info->stroke) == MagickFalse))) status&=SetImageColorspace(image,sRGBColorspace,exception); if (draw_info->compliance == SVGCompliance) { status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask, exception); status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask, exception); } x=CastDoubleToLong(ceil(primitive_info->point.x-0.5)); y=CastDoubleToLong(ceil(primitive_info->point.y-0.5)); image_view=AcquireAuthenticCacheView(image,exception); switch (primitive_info->primitive) { case AlphaPrimitive: { if (image->alpha_trait == UndefinedPixelTrait) status&=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { PixelInfo pixel, target; status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { ChannelType channel_mask; PixelInfo target; status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } channel_mask=SetImageChannelMask(image,AlphaChannel); status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); (void) SetImageChannelMask(image,channel_mask); break; } case ResetMethod: { PixelInfo pixel; for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } } break; } case ColorPrimitive: { switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetPixelInfo(image,&pixel); GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { PixelInfo pixel, target; status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { PixelInfo target; status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); break; } case ResetMethod: { PixelInfo pixel; GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } } break; } case ImagePrimitive: { AffineMatrix affine; char composite_geometry[MagickPathExtent]; Image *composite_image, *composite_images; ImageInfo *clone_info; RectangleInfo geometry; ssize_t x1, y1; if (primitive_info->text == (char *) NULL) break; clone_info=AcquireImageInfo(); composite_images=(Image *) NULL; if (LocaleNCompare(primitive_info->text,"data:",5) == 0) composite_images=ReadInlineImage(clone_info,primitive_info->text, exception); else if (*primitive_info->text != '\0') { (void) CopyMagickString(clone_info->filename,primitive_info->text, MagickPathExtent); status&=SetImageInfo(clone_info,0,exception); (void) CopyMagickString(clone_info->filename,primitive_info->text, MagickPathExtent); if (clone_info->size != (char *) NULL) clone_info->size=DestroyString(clone_info->size); if (clone_info->extract != (char *) NULL) clone_info->extract=DestroyString(clone_info->extract); if ((LocaleCompare(clone_info->magick,"file") == 0) || (LocaleCompare(clone_info->magick,"https") == 0) || (LocaleCompare(clone_info->magick,"http") == 0) || (LocaleCompare(clone_info->magick,"mpri") == 0) || (IsPathAccessible(clone_info->filename) != MagickFalse)) composite_images=ReadImage(clone_info,exception); } clone_info=DestroyImageInfo(clone_info); if (composite_images == (Image *) NULL) { status=MagickFalse; break; } composite_image=RemoveFirstImageFromList(&composite_images); composite_images=DestroyImageList(composite_images); (void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor) NULL,(void *) NULL); x1=CastDoubleToLong(ceil(primitive_info[1].point.x-0.5)); y1=CastDoubleToLong(ceil(primitive_info[1].point.y-0.5)); if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) || ((y1 != 0L) && (y1 != (ssize_t) composite_image->rows))) { /* Resize image. */ (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y); composite_image->filter=image->filter; status&=TransformImage(&composite_image,(char *) NULL, composite_geometry,exception); } if (composite_image->alpha_trait == UndefinedPixelTrait) status&=SetImageAlphaChannel(composite_image,OpaqueAlphaChannel, exception); if (draw_info->alpha != OpaqueAlpha) status&=SetImageAlpha(composite_image,draw_info->alpha,exception); SetGeometry(image,&geometry); image->gravity=draw_info->gravity; geometry.x=x; geometry.y=y; (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double) composite_image->rows,(double) geometry.x,(double) geometry.y); (void) ParseGravityGeometry(image,composite_geometry,&geometry,exception); affine=draw_info->affine; affine.tx=(double) geometry.x; affine.ty=(double) geometry.y; composite_image->interpolate=image->interpolate; if ((draw_info->compose == OverCompositeOp) || (draw_info->compose == SrcOverCompositeOp)) status&=DrawAffineImage(image,composite_image,&affine,exception); else status&=CompositeImage(image,composite_image,draw_info->compose, MagickTrue,geometry.x,geometry.y,exception); composite_image=DestroyImage(composite_image); break; } case PointPrimitive: { PixelInfo fill_color; Quantum *q; if ((y < 0) || (y >= (ssize_t) image->rows)) break; if ((x < 0) || (x >= (ssize_t) image->columns)) break; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&fill_color,exception); CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,(double) GetPixelAlpha(image,q),q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case TextPrimitive: { char geometry[MagickPathExtent]; DrawInfo *clone_info; if (primitive_info->text == (char *) NULL) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->text,primitive_info->text); (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); (void) CloneString(&clone_info->geometry,geometry); status&=AnnotateImage(image,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); break; } default: { double mid, scale; DrawInfo *clone_info; if (IsEventLogging() != MagickFalse) LogPrimitiveInfo(primitive_info); scale=ExpandAffine(&draw_info->affine); if ((draw_info->dash_pattern != (double *) NULL) && (fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) && (fabs(scale*draw_info->stroke_width) >= MagickEpsilon) && (draw_info->stroke.alpha != (Quantum) TransparentAlpha)) { /* Draw dash polygon. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); if (status != MagickFalse) status&=DrawDashPolygon(draw_info,primitive_info,image,exception); break; } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; if ((mid > 1.0) && ((draw_info->stroke.alpha != (Quantum) TransparentAlpha) || (draw_info->stroke_pattern != (Image *) NULL))) { double point_x, point_y; MagickBooleanType closed_path; /* Draw strokes while respecting line cap/join attributes. */ closed_path=primitive_info[0].closed_subpath; i=(ssize_t) primitive_info[0].coordinates; point_x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x); point_y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y); if ((point_x < MagickEpsilon) && (point_y < MagickEpsilon)) closed_path=MagickTrue; if ((((draw_info->linecap == RoundCap) || (closed_path != MagickFalse)) && (draw_info->linejoin == RoundJoin)) || (primitive_info[i].primitive != UndefinedPrimitive)) { status&=DrawPolygonPrimitive(image,draw_info,primitive_info, exception); break; } clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); if (status != MagickFalse) status&=DrawStrokePolygon(image,draw_info,primitive_info,exception); break; } status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception); break; } } image_view=DestroyCacheView(image_view); if (draw_info->compliance == SVGCompliance) { status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception); status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception); } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w S t r o k e P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on % the image while respecting the line cap and join attributes. % % The format of the DrawStrokePolygon method is: % % MagickBooleanType DrawStrokePolygon(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % */ static MagickBooleanType DrawRoundLinecap(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { PrimitiveInfo linecap[5]; ssize_t i; for (i=0; i < 4; i++) linecap[i]=(*primitive_info); linecap[0].coordinates=4; linecap[1].point.x+=2.0*MagickEpsilon; linecap[2].point.x+=2.0*MagickEpsilon; linecap[2].point.y+=2.0*MagickEpsilon; linecap[3].point.y+=2.0*MagickEpsilon; linecap[4].primitive=UndefinedPrimitive; return(DrawPolygonPrimitive(image,draw_info,linecap,exception)); } static MagickBooleanType DrawStrokePolygon(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { DrawInfo *clone_info; MagickBooleanType closed_path; MagickStatusType status; PrimitiveInfo *stroke_polygon; const PrimitiveInfo *p, *q; /* Draw stroked polygon. */ if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-stroke-polygon"); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill=draw_info->stroke; if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; clone_info->stroke_width=0.0; clone_info->fill_rule=NonZeroRule; status=MagickTrue; for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates) { if (p->coordinates == 1) continue; stroke_polygon=TraceStrokePolygon(draw_info,p,exception); if (stroke_polygon == (PrimitiveInfo *) NULL) { status=0; break; } status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception); stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); if (status == 0) break; q=p+p->coordinates-1; closed_path=p->closed_subpath; if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse)) { status&=DrawRoundLinecap(image,draw_info,p,exception); status&=DrawRoundLinecap(image,draw_info,q,exception); } } clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-stroke-polygon"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A f f i n e M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAffineMatrix() returns an AffineMatrix initialized to the identity % matrix. % % The format of the GetAffineMatrix method is: % % void GetAffineMatrix(AffineMatrix *affine_matrix) % % A description of each parameter follows: % % o affine_matrix: the affine matrix. % */ MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(affine_matrix != (AffineMatrix *) NULL); (void) memset(affine_matrix,0,sizeof(*affine_matrix)); affine_matrix->sx=1.0; affine_matrix->sy=1.0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetDrawInfo() initializes draw_info to default values from image_info. % % The format of the GetDrawInfo method is: % % void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info.. % % o draw_info: the draw info. % */ MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) { char *next_token; const char *option; ExceptionInfo *exception; ImageInfo *clone_info; /* Initialize draw attributes. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info != (DrawInfo *) NULL); (void) memset(draw_info,0,sizeof(*draw_info)); clone_info=CloneImageInfo(image_info); GetAffineMatrix(&draw_info->affine); exception=AcquireExceptionInfo(); (void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill, exception); (void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke, exception); draw_info->stroke_antialias=clone_info->antialias; draw_info->stroke_width=1.0; draw_info->fill_rule=EvenOddRule; draw_info->alpha=OpaqueAlpha; draw_info->fill_alpha=OpaqueAlpha; draw_info->stroke_alpha=OpaqueAlpha; draw_info->linecap=ButtCap; draw_info->linejoin=MiterJoin; draw_info->miterlimit=10; draw_info->decorate=NoDecoration; draw_info->pointsize=12.0; draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha; draw_info->compose=OverCompositeOp; draw_info->render=MagickTrue; draw_info->clip_path=MagickFalse; draw_info->debug=IsEventLogging(); if (clone_info->font != (char *) NULL) draw_info->font=AcquireString(clone_info->font); if (clone_info->density != (char *) NULL) draw_info->density=AcquireString(clone_info->density); draw_info->text_antialias=clone_info->antialias; if (fabs(clone_info->pointsize) >= MagickEpsilon) draw_info->pointsize=clone_info->pointsize; draw_info->border_color=clone_info->border_color; if (clone_info->server_name != (char *) NULL) draw_info->server_name=AcquireString(clone_info->server_name); option=GetImageOption(clone_info,"direction"); if (option != (const char *) NULL) draw_info->direction=(DirectionType) ParseCommandOption( MagickDirectionOptions,MagickFalse,option); else draw_info->direction=UndefinedDirection; option=GetImageOption(clone_info,"encoding"); if (option != (const char *) NULL) (void) CloneString(&draw_info->encoding,option); option=GetImageOption(clone_info,"family"); if (option != (const char *) NULL) (void) CloneString(&draw_info->family,option); option=GetImageOption(clone_info,"fill"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->fill, exception); option=GetImageOption(clone_info,"gravity"); if (option != (const char *) NULL) draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(clone_info,"interline-spacing"); if (option != (const char *) NULL) draw_info->interline_spacing=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"interword-spacing"); if (option != (const char *) NULL) draw_info->interword_spacing=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"kerning"); if (option != (const char *) NULL) draw_info->kerning=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"stroke"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke, exception); option=GetImageOption(clone_info,"strokewidth"); if (option != (const char *) NULL) draw_info->stroke_width=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"style"); if (option != (const char *) NULL) draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse,option); option=GetImageOption(clone_info,"undercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor, exception); option=GetImageOption(clone_info,"weight"); if (option != (const char *) NULL) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(option); draw_info->weight=(size_t) weight; } exception=DestroyExceptionInfo(exception); draw_info->signature=MagickCoreSignature; clone_info=DestroyImageInfo(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r m u t a t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Permutate() returns the permuation of the (n,k). % % The format of the Permutate method is: % % void Permutate(ssize_t n,ssize_t k) % % A description of each parameter follows: % % o n: % % o k: % % */ static inline double Permutate(const ssize_t n,const ssize_t k) { double r; ssize_t i; r=1.0; for (i=k+1; i <= n; i++) r*=i; for (i=1; i <= (n-k); i++) r/=i; return(r); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a c e P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TracePrimitive is a collection of methods for generating graphic % primitives such as arcs, ellipses, paths, etc. % */ static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo degrees) { PointInfo center, radius; center.x=0.5*(end.x+start.x); center.y=0.5*(end.y+start.y); radius.x=fabs(center.x-start.x); radius.y=fabs(center.y-start.y); return(TraceEllipse(mvg_info,center,radius,degrees)); } static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo arc,const double angle, const MagickBooleanType large_arc,const MagickBooleanType sweep) { double alpha, beta, delta, factor, gamma, theta; MagickStatusType status; PointInfo center, points[3], radii; double cosine, sine; PrimitiveInfo *primitive_info; PrimitiveInfo *p; ssize_t i; size_t arc_segments; ssize_t offset; offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) return(TracePoint(primitive_info,end)); radii.x=fabs(arc.x); radii.y=fabs(arc.y); if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon)) return(TraceLine(primitive_info,start,end)); cosine=cos(DegreesToRadians(fmod((double) angle,360.0))); sine=sin(DegreesToRadians(fmod((double) angle,360.0))); center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2); center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2); delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/ (radii.y*radii.y); if (delta < MagickEpsilon) return(TraceLine(primitive_info,start,end)); if (delta > 1.0) { radii.x*=sqrt((double) delta); radii.y*=sqrt((double) delta); } points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x); points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y); points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x); points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y); alpha=points[1].x-points[0].x; beta=points[1].y-points[0].y; if (fabs(alpha*alpha+beta*beta) < MagickEpsilon) return(TraceLine(primitive_info,start,end)); factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25; if (factor <= 0.0) factor=0.0; else { factor=sqrt((double) factor); if (sweep == large_arc) factor=(-factor); } center.x=(double) ((points[0].x+points[1].x)/2-factor*beta); center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha); alpha=atan2(points[0].y-center.y,points[0].x-center.x); theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha; if ((theta < 0.0) && (sweep != MagickFalse)) theta+=2.0*MagickPI; else if ((theta > 0.0) && (sweep == MagickFalse)) theta-=2.0*MagickPI; arc_segments=(size_t) CastDoubleToLong(ceil(fabs((double) (theta/(0.5* MagickPI+MagickEpsilon))))); status=MagickTrue; p=primitive_info; for (i=0; i < (ssize_t) arc_segments; i++) { beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments)); gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))* sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/ sin(fmod((double) beta,DegreesToRadians(360.0))); points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x; p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y; (p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y* points[0].y); (p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y* points[0].y); (p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y* points[1].y); (p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y* points[1].y); (p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y* points[2].y); (p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y* points[2].y); if (i == (ssize_t) (arc_segments-1)) (p+3)->point=end; status&=TraceBezier(mvg_info,4); if (status == 0) break; p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; p+=p->coordinates; } if (status == 0) return(MagickFalse); mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceBezier(MVGInfo *mvg_info, const size_t number_coordinates) { double alpha, *coefficients, weight; PointInfo end, point, *points; PrimitiveInfo *primitive_info; PrimitiveInfo *p; ssize_t i, j; size_t control_points, quantum; /* Allocate coefficients. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; quantum=number_coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { for (j=i+1; j < (ssize_t) number_coordinates; j++) { alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x); if (alpha > (double) MAGICK_SSIZE_MAX) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (alpha > (double) quantum) quantum=(size_t) alpha; alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y); if (alpha > (double) MAGICK_SSIZE_MAX) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (alpha > (double) quantum) quantum=(size_t) alpha; } } primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; quantum=MagickMin(quantum/number_coordinates,BezierQuantum); coefficients=(double *) AcquireQuantumMemory(number_coordinates, sizeof(*coefficients)); points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates* sizeof(*points)); if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL)) { if (points != (PointInfo *) NULL) points=(PointInfo *) RelinquishMagickMemory(points); if (coefficients != (double *) NULL) coefficients=(double *) RelinquishMagickMemory(coefficients); (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } control_points=quantum*number_coordinates; if (CheckPrimitiveExtent(mvg_info,(double) control_points+1) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; /* Compute bezier points. */ end=primitive_info[number_coordinates-1].point; for (i=0; i < (ssize_t) number_coordinates; i++) coefficients[i]=Permutate((ssize_t) number_coordinates-1,i); weight=0.0; for (i=0; i < (ssize_t) control_points; i++) { p=primitive_info; point.x=0.0; point.y=0.0; alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0); for (j=0; j < (ssize_t) number_coordinates; j++) { point.x+=alpha*coefficients[j]*p->point.x; point.y+=alpha*coefficients[j]*p->point.y; alpha*=weight/(1.0-weight); p++; } points[i]=point; weight+=1.0/control_points; } /* Bezier curves are just short segmented polys. */ p=primitive_info; for (i=0; i < (ssize_t) control_points; i++) { if (TracePoint(p,points[i]) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } p+=p->coordinates; } if (TracePoint(p,end) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickTrue); } static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start, const PointInfo end) { double alpha, beta, radius; PointInfo offset, degrees; alpha=end.x-start.x; beta=end.y-start.y; radius=hypot((double) alpha,(double) beta); offset.x=(double) radius; offset.y=(double) radius; degrees.x=0.0; degrees.y=360.0; return(TraceEllipse(mvg_info,start,offset,degrees)); } static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center, const PointInfo radii,const PointInfo arc) { double coordinates, delta, step, x, y; PointInfo angle, point; PrimitiveInfo *primitive_info; PrimitiveInfo *p; ssize_t i; /* Ellipses are just short segmented polys. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon)) return(MagickTrue); delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y)); step=MagickPI/8.0; if ((delta >= 0.0) && (delta < (MagickPI/8.0))) step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0); angle.x=DegreesToRadians(arc.x); y=arc.y; while (y < arc.x) y+=360.0; angle.y=DegreesToRadians(y); coordinates=ceil((angle.y-angle.x)/step+1.0); if (CheckPrimitiveExtent(mvg_info,coordinates) == MagickFalse) return(MagickFalse); primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; for (p=primitive_info; angle.x < angle.y; angle.x+=step) { point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; } point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; x=fabs(primitive_info[0].point.x- primitive_info[primitive_info->coordinates-1].point.x); y=fabs(primitive_info[0].point.y- primitive_info[primitive_info->coordinates-1].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { if (TracePoint(primitive_info,start) == MagickFalse) return(MagickFalse); if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) { primitive_info->primitive=PointPrimitive; primitive_info->coordinates=1; return(MagickTrue); } if (TracePoint(primitive_info+1,end) == MagickFalse) return(MagickFalse); (primitive_info+1)->primitive=primitive_info->primitive; primitive_info->coordinates=2; primitive_info->closed_subpath=MagickFalse; return(MagickTrue); } static ssize_t TracePath(MVGInfo *mvg_info,const char *path, ExceptionInfo *exception) { char *next_token, token[MagickPathExtent]; const char *p; double x, y; int attribute, last_attribute; MagickBooleanType status; PointInfo end = {0.0, 0.0}, points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} }, point = {0.0, 0.0}, start = {0.0, 0.0}; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; PrimitiveInfo *q; ssize_t i; size_t number_coordinates, z_count; ssize_t subpath_offset; subpath_offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; status=MagickTrue; attribute=0; number_coordinates=0; z_count=0; primitive_type=primitive_info->primitive; q=primitive_info; for (p=path; *p != '\0'; ) { if (status == MagickFalse) break; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == '\0') break; last_attribute=attribute; attribute=(int) (*p++); switch (attribute) { case 'a': case 'A': { double angle = 0.0; MagickBooleanType large_arc = MagickFalse, sweep = MagickFalse; PointInfo arc = {0.0, 0.0}; /* Elliptical arc. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); arc.x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); arc.y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse; (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse; if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'A' ? x : point.x+x); end.y=(double) (attribute == (int) 'A' ? y : point.y+y); if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'c': case 'C': { /* Cubic Bézier curve. */ do { points[0]=point; for (i=1; i < 4; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'C' ? x : point.x+x); end.y=(double) (attribute == (int) 'C' ? y : point.y+y); points[i]=end; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'H': case 'h': { do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'H' ? x: point.x+x); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'l': case 'L': { /* Line to. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'L' ? x : point.x+x); point.y=(double) (attribute == (int) 'L' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'M': case 'm': { /* Move to. */ if (mvg_info->offset != subpath_offset) { primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; } i=0; do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'M' ? x : point.x+x); point.y=(double) (attribute == (int) 'M' ? y : point.y+y); if (i == 0) start=point; i++; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'q': case 'Q': { /* Quadratic Bézier curve. */ do { points[0]=point; for (i=1; i < 3; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (*p == ',') p++; end.x=(double) (attribute == (int) 'Q' ? x : point.x+x); end.y=(double) (attribute == (int) 'Q' ? y : point.y+y); points[i]=end; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 's': case 'S': { /* Cubic Bézier curve. */ do { points[0]=points[3]; points[1].x=2.0*points[3].x-points[2].x; points[1].y=2.0*points[3].y-points[2].y; for (i=2; i < 4; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (*p == ',') p++; end.x=(double) (attribute == (int) 'S' ? x : point.x+x); end.y=(double) (attribute == (int) 'S' ? y : point.y+y); points[i]=end; } if (strchr("CcSs",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 't': case 'T': { /* Quadratic Bézier curve. */ do { points[0]=points[2]; points[1].x=2.0*points[2].x-points[1].x; points[1].y=2.0*points[2].y-points[1].y; for (i=2; i < 3; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'T' ? x : point.x+x); end.y=(double) (attribute == (int) 'T' ? y : point.y+y); points[i]=end; } if (status == MagickFalse) break; if (strchr("QqTt",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'v': case 'V': { /* Line to. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.y=(double) (attribute == (int) 'V' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'z': case 'Z': { /* Close path. */ point=start; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); primitive_info->closed_subpath=MagickTrue; number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; z_count++; break; } default: { ThrowPointExpectedException(token,exception); break; } } } if (status == MagickFalse) return(-1); primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { q--; q->primitive=primitive_type; if (z_count > 1) q->method=FillToBorderMethod; } q=primitive_info; return((ssize_t) number_coordinates); } static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { PointInfo point; PrimitiveInfo *p; ssize_t i; p=primitive_info; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=start.x; point.y=end.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,end) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=end.x; point.y=start.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info, const PointInfo start,const PointInfo end,PointInfo arc) { PointInfo degrees, point, segment; PrimitiveInfo *primitive_info; PrimitiveInfo *p; ssize_t i; ssize_t offset; offset=mvg_info->offset; segment.x=fabs(end.x-start.x); segment.y=fabs(end.y-start.y); if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon)) { (*mvg_info->primitive_info+mvg_info->offset)->coordinates=0; return(MagickTrue); } if (arc.x > (0.5*segment.x)) arc.x=0.5*segment.x; if (arc.y > (0.5*segment.y)) arc.y=0.5*segment.y; point.x=start.x+segment.x-arc.x; point.y=start.y+arc.y; degrees.x=270.0; degrees.y=360.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+segment.x-arc.x; point.y=start.y+segment.y-arc.y; degrees.x=0.0; degrees.y=90.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+segment.y-arc.y; degrees.x=90.0; degrees.y=180.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+arc.y; degrees.x=180.0; degrees.y=270.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse) return(MagickFalse); p+=p->coordinates; mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info, const size_t number_vertices,const double offset) { double distance; double dx, dy; ssize_t i; ssize_t j; dx=0.0; dy=0.0; for (i=1; i < (ssize_t) number_vertices; i++) { dx=primitive_info[0].point.x-primitive_info[i].point.x; dy=primitive_info[0].point.y-primitive_info[i].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } if (i == (ssize_t) number_vertices) i=(ssize_t) number_vertices-1L; distance=hypot((double) dx,(double) dy); primitive_info[0].point.x=(double) (primitive_info[i].point.x+ dx*(distance+offset)/distance); primitive_info[0].point.y=(double) (primitive_info[i].point.y+ dy*(distance+offset)/distance); for (j=(ssize_t) number_vertices-2; j >= 0; j--) { dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x; dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } distance=hypot((double) dx,(double) dy); primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+ dx*(distance+offset)/distance); primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+ dy*(distance+offset)/distance); return(MagickTrue); } static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,ExceptionInfo *exception) { #define MaxStrokePad (6*BezierQuantum+360) #define CheckPathExtent(pad_p,pad_q) \ { \ if ((pad_p) > MaxBezierCoordinates) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ else \ if ((ssize_t) (p+(pad_p)) >= (ssize_t) extent_p) \ { \ if (~extent_p < (pad_p)) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ else \ { \ extent_p+=(pad_p); \ stroke_p=(PointInfo *) ResizeQuantumMemory(stroke_p,extent_p+ \ MaxStrokePad,sizeof(*stroke_p)); \ } \ } \ if ((pad_q) > MaxBezierCoordinates) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ else \ if ((ssize_t) (q+(pad_q)) >= (ssize_t) extent_q) \ { \ if (~extent_q < (pad_q)) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ else \ { \ extent_q+=(pad_q); \ stroke_q=(PointInfo *) ResizeQuantumMemory(stroke_q,extent_q+ \ MaxStrokePad,sizeof(*stroke_q)); \ } \ } \ if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) \ { \ if (stroke_p != (PointInfo *) NULL) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ if (stroke_q != (PointInfo *) NULL) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ polygon_primitive=(PrimitiveInfo *) \ RelinquishMagickMemory(polygon_primitive); \ (void) ThrowMagickException(exception,GetMagickModule(), \ ResourceLimitError,"MemoryAllocationFailed","`%s'",""); \ return((PrimitiveInfo *) NULL); \ } \ } typedef struct _StrokeSegment { double p, q; } StrokeSegment; double delta_theta, dot_product, mid, miterlimit; MagickBooleanType closed_path; PointInfo box_p[5], box_q[5], center, offset, *stroke_p, *stroke_q; PrimitiveInfo *polygon_primitive, *stroke_polygon; ssize_t i; size_t arc_segments, extent_p, extent_q, number_vertices; ssize_t j, n, p, q; StrokeSegment dx = {0.0, 0.0}, dy = {0.0, 0.0}, inverse_slope = {0.0, 0.0}, slope = {0.0, 0.0}, theta = {0.0, 0.0}; /* Allocate paths. */ number_vertices=primitive_info->coordinates; polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_vertices+2UL,sizeof(*polygon_primitive)); if (polygon_primitive == (PrimitiveInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PrimitiveInfo *) NULL); } (void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices* sizeof(*polygon_primitive)); offset.x=primitive_info[number_vertices-1].point.x-primitive_info[0].point.x; offset.y=primitive_info[number_vertices-1].point.y-primitive_info[0].point.y; closed_path=(fabs(offset.x) < MagickEpsilon) && (fabs(offset.y) < MagickEpsilon) ? MagickTrue : MagickFalse; if (((draw_info->linejoin == RoundJoin) || (draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse)) { polygon_primitive[number_vertices]=primitive_info[1]; number_vertices++; } polygon_primitive[number_vertices].primitive=UndefinedPrimitive; /* Compute the slope for the first line segment, p. */ dx.p=0.0; dy.p=0.0; for (n=1; n < (ssize_t) number_vertices; n++) { dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x; dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y; if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon)) break; } if (n == (ssize_t) number_vertices) { if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse)) { /* Zero length subpath. */ stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory( sizeof(*stroke_polygon)); stroke_polygon[0]=polygon_primitive[0]; stroke_polygon[0].coordinates=0; polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return(stroke_polygon); } n=(ssize_t) number_vertices-1L; } extent_p=2*number_vertices; extent_q=2*number_vertices; stroke_p=(PointInfo *) AcquireQuantumMemory((size_t) extent_p+MaxStrokePad, sizeof(*stroke_p)); stroke_q=(PointInfo *) AcquireQuantumMemory((size_t) extent_q+MaxStrokePad, sizeof(*stroke_q)); if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) { if (stroke_p != (PointInfo *) NULL) stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); if (stroke_q != (PointInfo *) NULL) stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PrimitiveInfo *) NULL); } slope.p=0.0; inverse_slope.p=0.0; if (fabs(dx.p) < MagickEpsilon) { if (dx.p >= 0.0) slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.p) < MagickEpsilon) { if (dy.p >= 0.0) inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.p=dy.p/dx.p; inverse_slope.p=(-1.0/slope.p); } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid); if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse)) (void) TraceSquareLinecap(polygon_primitive,number_vertices,mid); offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0))); offset.y=(double) (offset.x*inverse_slope.p); if ((dy.p*offset.x-dx.p*offset.y) > 0.0) { box_p[0].x=polygon_primitive[0].point.x-offset.x; box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p; box_p[1].x=polygon_primitive[n].point.x-offset.x; box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p; box_q[0].x=polygon_primitive[0].point.x+offset.x; box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p; box_q[1].x=polygon_primitive[n].point.x+offset.x; box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p; } else { box_p[0].x=polygon_primitive[0].point.x+offset.x; box_p[0].y=polygon_primitive[0].point.y+offset.y; box_p[1].x=polygon_primitive[n].point.x+offset.x; box_p[1].y=polygon_primitive[n].point.y+offset.y; box_q[0].x=polygon_primitive[0].point.x-offset.x; box_q[0].y=polygon_primitive[0].point.y-offset.y; box_q[1].x=polygon_primitive[n].point.x-offset.x; box_q[1].y=polygon_primitive[n].point.y-offset.y; } /* Create strokes for the line join attribute: bevel, miter, round. */ p=0; q=0; stroke_q[p++]=box_q[0]; stroke_p[q++]=box_p[0]; for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++) { /* Compute the slope for this line segment, q. */ dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x; dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y; dot_product=dx.q*dx.q+dy.q*dy.q; if (dot_product < 0.25) continue; slope.q=0.0; inverse_slope.q=0.0; if (fabs(dx.q) < MagickEpsilon) { if (dx.q >= 0.0) slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.q) < MagickEpsilon) { if (dy.q >= 0.0) inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.q=dy.q/dx.q; inverse_slope.q=(-1.0/slope.q); } offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0))); offset.y=(double) (offset.x*inverse_slope.q); dot_product=dy.q*offset.x-dx.q*offset.y; if (dot_product > 0.0) { box_p[2].x=polygon_primitive[n].point.x-offset.x; box_p[2].y=polygon_primitive[n].point.y-offset.y; box_p[3].x=polygon_primitive[i].point.x-offset.x; box_p[3].y=polygon_primitive[i].point.y-offset.y; box_q[2].x=polygon_primitive[n].point.x+offset.x; box_q[2].y=polygon_primitive[n].point.y+offset.y; box_q[3].x=polygon_primitive[i].point.x+offset.x; box_q[3].y=polygon_primitive[i].point.y+offset.y; } else { box_p[2].x=polygon_primitive[n].point.x+offset.x; box_p[2].y=polygon_primitive[n].point.y+offset.y; box_p[3].x=polygon_primitive[i].point.x+offset.x; box_p[3].y=polygon_primitive[i].point.y+offset.y; box_q[2].x=polygon_primitive[n].point.x-offset.x; box_q[2].y=polygon_primitive[n].point.y-offset.y; box_q[3].x=polygon_primitive[i].point.x-offset.x; box_q[3].y=polygon_primitive[i].point.y-offset.y; } if (fabs((double) (slope.p-slope.q)) < MagickEpsilon) { box_p[4]=box_p[1]; box_q[4]=box_q[1]; } else { box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+ box_p[3].y)/(slope.p-slope.q)); box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y); box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+ box_q[3].y)/(slope.p-slope.q)); box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y); } DisableMSCWarning(4127) CheckPathExtent(MaxStrokePad,MaxStrokePad); RestoreMSCWarning dot_product=dx.q*dy.p-dx.p*dy.q; if (dot_product <= 0.0) switch (draw_info->linejoin) { case BevelJoin: { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_p[p++]=box_p[4]; else { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { stroke_q[q++]=box_q[4]; stroke_p[p++]=box_p[4]; } else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_p[p++]=box_p[4]; else { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x); theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x); if (theta.q < theta.p) theta.q+=2.0*MagickPI; arc_segments=(size_t) CastDoubleToLong(ceil((double) ((theta. q-theta.p)/(2.0*sqrt(PerceptibleReciprocal(mid)))))); DisableMSCWarning(4127) CheckPathExtent(MaxStrokePad,arc_segments+MaxStrokePad); RestoreMSCWarning stroke_q[q].x=box_q[1].x; stroke_q[q].y=box_q[1].y; q++; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); stroke_q[q].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); stroke_q[q].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); q++; } stroke_q[q++]=box_q[2]; break; } default: break; } else switch (draw_info->linejoin) { case BevelJoin: { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_q[q++]=box_q[4]; else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { stroke_q[q++]=box_q[4]; stroke_p[p++]=box_p[4]; } else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_q[q++]=box_q[4]; else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x); theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x); if (theta.p < theta.q) theta.p+=2.0*MagickPI; arc_segments=(size_t) CastDoubleToLong(ceil((double) ((theta.p- theta.q)/(2.0*sqrt((double) (PerceptibleReciprocal(mid))))))); DisableMSCWarning(4127) CheckPathExtent(arc_segments+MaxStrokePad,MaxStrokePad); RestoreMSCWarning stroke_p[p++]=box_p[1]; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); stroke_p[p].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); stroke_p[p].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); p++; } stroke_p[p++]=box_p[2]; break; } default: break; } slope.p=slope.q; inverse_slope.p=inverse_slope.q; box_p[0]=box_p[2]; box_p[1]=box_p[3]; box_q[0]=box_q[2]; box_q[1]=box_q[3]; dx.p=dx.q; dy.p=dy.q; n=i; } stroke_p[p++]=box_p[1]; stroke_q[q++]=box_q[1]; /* Trace stroked polygon. */ stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon)); if (stroke_polygon == (PrimitiveInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return(stroke_polygon); } for (i=0; i < (ssize_t) p; i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_p[i]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; } for ( ; i < (ssize_t) (p+q+closed_path); i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_q[p+q+closed_path-(i+1)]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[p+closed_path].point; i++; } stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; stroke_polygon[i].primitive=UndefinedPrimitive; stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1); stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return(stroke_polygon); }
Mat_dh.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision: 2.9 $ ***********************************************************************EHEADER*/ #include "Mat_dh.h" #include "getRow_dh.h" #include "SubdomainGraph_dh.h" #include "TimeLog_dh.h" #include "Mem_dh.h" #include "Numbering_dh.h" #include "Parser_dh.h" #include "mat_dh_private.h" #include "io_dh.h" #include "Hash_i_dh.h" static void setup_matvec_sends_private(Mat_dh mat, int *inlist); static void setup_matvec_receives_private(Mat_dh mat, int *beg_rows, int *end_rows, int reqlen, int *reqind, int *outlist); #if 0 partial (?) implementation below; not used anyplace, I think; for future expansion? [mar 21, 2K+1] static void Mat_dhAllocate_getRow_private(Mat_dh A); #endif static bool commsOnly = false; /* experimental, for matvec functions */ #undef __FUNC__ #define __FUNC__ "Mat_dhCreate" void Mat_dhCreate(Mat_dh *mat) { START_FUNC_DH struct _mat_dh* tmp = (struct _mat_dh*)MALLOC_DH(sizeof(struct _mat_dh)); CHECK_V_ERROR; *mat = tmp; commsOnly = Parser_dhHasSwitch(parser_dh, "-commsOnly"); if (myid_dh == 0 && commsOnly == true) { /* printf("\n@@@ commsOnly == true for matvecs! @@@\n"); */ fflush(stdout); } tmp->m = 0; tmp->n = 0; tmp->beg_row = 0; tmp->bs = 1; tmp->rp = NULL; tmp->len = NULL; tmp->cval = NULL; tmp->aval = NULL; tmp->diag = NULL; tmp->fill = NULL; tmp->owner = true; tmp->len_private = 0; tmp->rowCheckedOut = -1; tmp->cval_private = NULL; tmp->aval_private = NULL; tmp->row_perm = NULL; tmp->num_recv = 0; tmp->num_send = 0; tmp->recv_req = NULL; tmp->send_req = NULL; tmp->status = NULL; tmp->recvbuf = NULL; tmp->sendbuf = NULL; tmp->sendind = NULL; tmp->sendlen = 0; tmp->recvlen = 0; tmp->numb = NULL; tmp->matvecIsSetup = false; Mat_dhZeroTiming(tmp); CHECK_V_ERROR; tmp->matvec_timing = true; tmp->debug = Parser_dhHasSwitch(parser_dh, "-debug_Mat"); END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Mat_dhDestroy" void Mat_dhDestroy(Mat_dh mat) { START_FUNC_DH int i; if (mat->owner) { if (mat->rp != NULL) { FREE_DH(mat->rp); CHECK_V_ERROR; } if (mat->len != NULL) { FREE_DH(mat->len); CHECK_V_ERROR; } if (mat->cval != NULL) { FREE_DH(mat->cval); CHECK_V_ERROR; } if (mat->aval != NULL) { FREE_DH(mat->aval); CHECK_V_ERROR; } if (mat->diag != NULL) { FREE_DH(mat->diag); CHECK_V_ERROR; } if (mat->fill != NULL) { FREE_DH(mat->fill); CHECK_V_ERROR; } if (mat->cval_private != NULL) { FREE_DH(mat->cval_private); CHECK_V_ERROR; } if (mat->aval_private != NULL) { FREE_DH(mat->aval_private); CHECK_V_ERROR; } if (mat->row_perm != NULL) { FREE_DH(mat->row_perm); CHECK_V_ERROR; } } for (i=0; i<mat->num_recv; i++) MPI_Request_free(&mat->recv_req[i]); for (i=0; i<mat->num_send; i++) MPI_Request_free(&mat->send_req[i]); if (mat->recv_req != NULL) { FREE_DH(mat->recv_req); CHECK_V_ERROR; } if (mat->send_req != NULL) { FREE_DH(mat->send_req); CHECK_V_ERROR; } if (mat->status != NULL) { FREE_DH(mat->status); CHECK_V_ERROR; } if (mat->recvbuf != NULL) { FREE_DH(mat->recvbuf); CHECK_V_ERROR; } if (mat->sendbuf != NULL) { FREE_DH(mat->sendbuf); CHECK_V_ERROR; } if (mat->sendind != NULL) { FREE_DH(mat->sendind); CHECK_V_ERROR; } if (mat->matvecIsSetup) { Mat_dhMatVecSetdown(mat); CHECK_V_ERROR; } if (mat->numb != NULL) { Numbering_dhDestroy(mat->numb); CHECK_V_ERROR; } FREE_DH(mat); CHECK_V_ERROR; END_FUNC_DH } /* this should put the cval array back the way it was! */ #undef __FUNC__ #define __FUNC__ "Mat_dhMatVecSetDown" void Mat_dhMatVecSetdown(Mat_dh mat) { START_FUNC_DH if (ignoreMe) SET_V_ERROR("not implemented"); END_FUNC_DH } /* adopted from Edmond Chow's ParaSails */ #undef __FUNC__ #define __FUNC__ "Mat_dhMatVecSetup" void Mat_dhMatVecSetup(Mat_dh mat) { START_FUNC_DH if (np_dh == 1) { goto DO_NOTHING; } else { int *outlist, *inlist; int ierr, i, row, *rp = mat->rp, *cval = mat->cval; Numbering_dh numb; int m = mat->m; int firstLocal = mat->beg_row; int lastLocal = firstLocal+m; int *beg_rows, *end_rows; mat->recv_req = (MPI_Request *)MALLOC_DH(np_dh * sizeof(MPI_Request)); CHECK_V_ERROR; mat->send_req = (MPI_Request *)MALLOC_DH(np_dh * sizeof(MPI_Request)); CHECK_V_ERROR; mat->status = (MPI_Status *)MALLOC_DH(np_dh * sizeof(MPI_Status)); CHECK_V_ERROR; beg_rows = (int*)MALLOC_DH(np_dh*sizeof(int)); CHECK_V_ERROR; end_rows = (int*)MALLOC_DH(np_dh*sizeof(int)); CHECK_V_ERROR; if (np_dh == 1) { /* this is for debugging purposes in some of the drivers */ beg_rows[0] = 0; end_rows[0] = m; } else { ierr = MPI_Allgather(&firstLocal, 1, MPI_INT, beg_rows, 1, MPI_INT, comm_dh); CHECK_MPI_V_ERROR(ierr); ierr = MPI_Allgather(&lastLocal, 1, MPI_INT, end_rows, 1, MPI_INT, comm_dh); CHECK_MPI_V_ERROR(ierr); } outlist = (int *)MALLOC_DH(np_dh*sizeof(int)); CHECK_V_ERROR; inlist = (int *)MALLOC_DH(np_dh*sizeof(int)); CHECK_V_ERROR; for (i=0; i<np_dh; ++i) { outlist[i] = 0; inlist[i] = 0; } /* Create Numbering object */ Numbering_dhCreate(&(mat->numb)); CHECK_V_ERROR; numb = mat->numb; Numbering_dhSetup(numb, mat); CHECK_V_ERROR; setup_matvec_receives_private(mat, beg_rows, end_rows, numb->num_ext, numb->idx_ext, outlist); CHECK_V_ERROR; if (np_dh == 1) { /* this is for debugging purposes in some of the drivers */ inlist[0] = outlist[0]; } else { ierr = MPI_Alltoall(outlist, 1, MPI_INT, inlist, 1, MPI_INT, comm_dh); CHECK_MPI_V_ERROR(ierr); } setup_matvec_sends_private(mat, inlist); CHECK_V_ERROR; /* Convert to local indices */ for (row=0; row<m; row++) { int len = rp[row+1]-rp[row]; int *ind = cval+rp[row]; Numbering_dhGlobalToLocal(numb, len, ind, ind); CHECK_V_ERROR; } FREE_DH(outlist); CHECK_V_ERROR; FREE_DH(inlist); CHECK_V_ERROR; FREE_DH(beg_rows); CHECK_V_ERROR; FREE_DH(end_rows); CHECK_V_ERROR; } DO_NOTHING: ; END_FUNC_DH } /* adopted from Edmond Chow's ParaSails */ #undef __FUNC__ #define __FUNC__ "setup_matvec_receives_private" void setup_matvec_receives_private(Mat_dh mat, int *beg_rows, int *end_rows, int reqlen, int *reqind, int *outlist) { START_FUNC_DH int ierr, i, j, this_pe; MPI_Request request; int m = mat->m; mat->num_recv = 0; /* Allocate recvbuf */ /* recvbuf has numlocal entries saved for local part of x, used in matvec */ mat->recvbuf = (double*)MALLOC_DH((reqlen+m) * sizeof(double)); for (i=0; i<reqlen; i=j) { /* j is set below */ /* The processor that owns the row with index reqind[i] */ this_pe = mat_find_owner(beg_rows, end_rows, reqind[i]); CHECK_V_ERROR; /* Figure out other rows we need from this_pe */ for (j=i+1; j<reqlen; j++) { /* if row is on different pe */ if (reqind[j] < beg_rows[this_pe] || reqind[j] > end_rows[this_pe]) break; } /* Request rows in reqind[i..j-1] */ ierr = MPI_Isend(&reqind[i], j-i, MPI_INT, this_pe, 444, comm_dh, &request); CHECK_MPI_V_ERROR(ierr); ierr = MPI_Request_free(&request); CHECK_MPI_V_ERROR(ierr); /* Count of number of number of indices needed from this_pe */ outlist[this_pe] = j-i; ierr = MPI_Recv_init(&mat->recvbuf[i+m], j-i, MPI_DOUBLE, this_pe, 555, comm_dh, &mat->recv_req[mat->num_recv]); CHECK_MPI_V_ERROR(ierr); mat->num_recv++; mat->recvlen += j-i; /* only used for statistical reporting */ } END_FUNC_DH } /* adopted from Edmond Chow's ParaSails */ #undef __FUNC__ #define __FUNC__ "setup_matvec_sends_private" void setup_matvec_sends_private(Mat_dh mat, int *inlist) { START_FUNC_DH int ierr, i, j, sendlen, first = mat->beg_row; MPI_Request *requests; MPI_Status *statuses; requests = (MPI_Request *) MALLOC_DH(np_dh * sizeof(MPI_Request)); CHECK_V_ERROR; statuses = (MPI_Status *) MALLOC_DH(np_dh * sizeof(MPI_Status)); CHECK_V_ERROR; /* Determine size of and allocate sendbuf and sendind */ sendlen = 0; for (i=0; i<np_dh; i++) sendlen += inlist[i]; mat->sendlen = sendlen; mat->sendbuf = (double *)MALLOC_DH(sendlen * sizeof(double)); CHECK_V_ERROR; mat->sendind = (int *)MALLOC_DH(sendlen * sizeof(int)); CHECK_V_ERROR; j = 0; mat->num_send = 0; for (i=0; i<np_dh; i++) { if (inlist[i] != 0) { /* Post receive for the actual indices */ ierr = MPI_Irecv(&mat->sendind[j], inlist[i], MPI_INT, i, 444, comm_dh, &requests[mat->num_send]); CHECK_MPI_V_ERROR(ierr); /* Set up the send */ ierr = MPI_Send_init(&mat->sendbuf[j], inlist[i], MPI_DOUBLE, i, 555, comm_dh, &mat->send_req[mat->num_send]); CHECK_MPI_V_ERROR(ierr); mat->num_send++; j += inlist[i]; } } /* total bytes to be sent during matvec */ mat->time[MATVEC_WORDS] = j; ierr = MPI_Waitall(mat->num_send, requests, statuses); CHECK_MPI_V_ERROR(ierr); /* convert global indices to local indices */ /* these are all indices on this processor */ for (i=0; i<mat->sendlen; i++) mat->sendind[i] -= first; FREE_DH(requests); FREE_DH(statuses); END_FUNC_DH } /* unthreaded MPI version */ #undef __FUNC__ #define __FUNC__ "Mat_dhMatVec" void Mat_dhMatVec(Mat_dh mat, double *x, double *b) { START_FUNC_DH if (np_dh == 1) { Mat_dhMatVec_uni(mat, x, b); CHECK_V_ERROR; } else { int ierr, i, row, m = mat->m; int *rp = mat->rp, *cval = mat->cval; double *aval = mat->aval; int *sendind = mat->sendind; int sendlen = mat->sendlen; double *sendbuf = mat->sendbuf; double *recvbuf = mat->recvbuf; double t1 = 0, t2 = 0, t3 = 0, t4 = 0; bool timeFlag = mat->matvec_timing; if (timeFlag) t1 = MPI_Wtime(); /* Put components of x into the right outgoing buffers */ if (! commsOnly) { for (i=0; i<sendlen; i++) sendbuf[i] = x[sendind[i]]; } if (timeFlag) { t2 = MPI_Wtime(); mat->time[MATVEC_TIME] += (t2 - t1); } ierr = MPI_Startall(mat->num_recv, mat->recv_req); CHECK_MPI_V_ERROR(ierr); ierr = MPI_Startall(mat->num_send, mat->send_req); CHECK_MPI_V_ERROR(ierr); ierr = MPI_Waitall(mat->num_recv, mat->recv_req, mat->status); CHECK_MPI_V_ERROR(ierr); ierr = MPI_Waitall(mat->num_send, mat->send_req, mat->status); CHECK_MPI_V_ERROR(ierr); if (timeFlag) { t3 = MPI_Wtime(); mat->time[MATVEC_MPI_TIME] += (t3 - t2); } /* Copy local part of x into top part of recvbuf */ if (! commsOnly) { for (i=0; i<m; i++) recvbuf[i] = x[i]; /* do the multiply */ for (row=0; row<m; row++) { int len = rp[row+1] - rp[row]; int * ind = cval+rp[row]; double * val = aval+rp[row]; double temp = 0.0; for (i=0; i<len; i++) { temp += (val[i] * recvbuf[ind[i]]); } b[row] = temp; } } /* if (! commsOnly) */ if (timeFlag) { t4 = MPI_Wtime(); mat->time[MATVEC_TOTAL_TIME] += (t4 - t1); mat->time[MATVEC_TIME] += (t4 - t3); } } END_FUNC_DH } /* OpenMP/MPI version */ #undef __FUNC__ #define __FUNC__ "Mat_dhMatVec_omp" void Mat_dhMatVec_omp(Mat_dh mat, double *x, double *b) { START_FUNC_DH int ierr, i, row, m = mat->m; int *rp = mat->rp, *cval = mat->cval; double *aval = mat->aval; int *sendind = mat->sendind; int sendlen = mat->sendlen; double *sendbuf = mat->sendbuf; double *recvbuf = mat->recvbuf; double t1 = 0, t2 = 0, t3 = 0, t4 = 0, tx = 0; double *val, temp; int len, *ind; bool timeFlag = mat->matvec_timing; if (timeFlag) t1 = MPI_Wtime(); /* Put components of x into the right outgoing buffers */ #ifdef USING_OPENMP_DH #pragma omp parallel for schedule(runtime) private(i) #endif for (i=0; i<sendlen; i++) sendbuf[i] = x[sendind[i]]; if (timeFlag) { t2 = MPI_Wtime(); mat->time[MATVEC_TIME] += (t2 - t1); } ierr = MPI_Startall(mat->num_recv, mat->recv_req); CHECK_MPI_V_ERROR(ierr); ierr = MPI_Startall(mat->num_send, mat->send_req); CHECK_MPI_V_ERROR(ierr); ierr = MPI_Waitall(mat->num_recv, mat->recv_req, mat->status); CHECK_MPI_V_ERROR(ierr); ierr = MPI_Waitall(mat->num_send, mat->send_req, mat->status); CHECK_MPI_V_ERROR(ierr); if (timeFlag) { t3 = MPI_Wtime(); mat->time[MATVEC_MPI_TIME] += (t3 - t2); } /* Copy local part of x into top part of recvbuf */ #ifdef USING_OPENMP_DH #pragma omp parallel for schedule(runtime) private(i) #endif for (i=0; i<m; i++) recvbuf[i] = x[i]; if (timeFlag) { tx = MPI_Wtime(); mat->time[MATVEC_MPI_TIME2] += (tx - t1); } /* do the multiply */ #ifdef USING_OPENMP_DH #pragma omp parallel for schedule(runtime) private(row,i,len,ind,val,temp) #endif for (row=0; row<m; row++) { len = rp[row+1] - rp[row]; ind = cval+rp[row]; val = aval+rp[row]; temp = 0.0; for (i=0; i<len; i++) { temp += (val[i] * recvbuf[ind[i]]); } b[row] = temp; } if (timeFlag) { t4 = MPI_Wtime(); mat->time[MATVEC_TOTAL_TIME] += (t4 - t1); mat->time[MATVEC_TIME] += (t4 - t3); } END_FUNC_DH } /* OpenMP/single primary task version */ #undef __FUNC__ #define __FUNC__ "Mat_dhMatVec_uni_omp" void Mat_dhMatVec_uni_omp(Mat_dh mat, double *x, double *b) { START_FUNC_DH int i, row, m = mat->m; int *rp = mat->rp, *cval = mat->cval; double *aval = mat->aval; double t1 = 0, t2 = 0; bool timeFlag = mat->matvec_timing; if (timeFlag) { t1 = MPI_Wtime(); } /* do the multiply */ #ifdef USING_OPENMP_DH #pragma omp parallel for schedule(runtime) private(row,i) #endif for (row=0; row<m; row++) { int len = rp[row+1] - rp[row]; int * ind = cval+rp[row]; double * val = aval+rp[row]; double temp = 0.0; for (i=0; i<len; i++) { temp += (val[i] * x[ind[i]]); } b[row] = temp; } if (timeFlag) { t2 = MPI_Wtime(); mat->time[MATVEC_TIME] += (t2 - t1); mat->time[MATVEC_TOTAL_TIME] += (t2 - t1); } END_FUNC_DH } /* unthreaded, single-task version */ #undef __FUNC__ #define __FUNC__ "Mat_dhMatVec_uni" void Mat_dhMatVec_uni(Mat_dh mat, double *x, double *b) { START_FUNC_DH int i, row, m = mat->m; int *rp = mat->rp, *cval = mat->cval; double *aval = mat->aval; double t1 = 0, t2 = 0; bool timeFlag = mat->matvec_timing; if (timeFlag) t1 = MPI_Wtime(); for (row=0; row<m; row++) { int len = rp[row+1] - rp[row]; int * ind = cval+rp[row]; double * val = aval+rp[row]; double temp = 0.0; for (i=0; i<len; i++) { temp += (val[i] * x[ind[i]]); } b[row] = temp; } if (timeFlag) { t2 = MPI_Wtime(); mat->time[MATVEC_TIME] += (t2 - t1); mat->time[MATVEC_TOTAL_TIME] += (t2 - t1); } END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Mat_dhReadNz" int Mat_dhReadNz(Mat_dh mat) { START_FUNC_DH int ierr, retval = mat->rp[mat->m]; int nz = retval; ierr = MPI_Allreduce(&nz, &retval, 1, MPI_INT, MPI_SUM, comm_dh); CHECK_MPI_ERROR(ierr); END_FUNC_VAL(retval) } #if 0 #undef __FUNC__ #define __FUNC__ "Mat_dhAllocate_getRow_private" void Mat_dhAllocate_getRow_private(Mat_dh A) { START_FUNC_DH int i, *rp = A->rp, len = 0; int m = A->m; /* find longest row in matrix */ for (i=0; i<m; ++i) len = MAX(len, rp[i+1]-rp[i]); len *= A->bs; /* free any previously allocated private storage */ if (len > A->len_private) { if (A->cval_private != NULL) { FREE_DH(A->cval_private); CHECK_V_ERROR; } if (A->aval_private != NULL) { FREE_DH(A->aval_private); CHECK_V_ERROR; } } /* allocate private storage */ A->cval_private = (int*)MALLOC_DH(len*sizeof(int)); CHECK_V_ERROR; A->aval_private = (double*)MALLOC_DH(len*sizeof(double)); CHECK_V_ERROR; A->len_private = len; END_FUNC_DH } #endif #undef __FUNC__ #define __FUNC__ "Mat_dhZeroTiming" void Mat_dhZeroTiming(Mat_dh mat) { START_FUNC_DH int i; for (i=0; i<MAT_DH_BINS; ++i) { mat->time[i] = 0; mat->time_max[i] = 0; mat->time_min[i] = 0; } END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Mat_dhReduceTiming" void Mat_dhReduceTiming(Mat_dh mat) { START_FUNC_DH if (mat->time[MATVEC_MPI_TIME]) { mat->time[MATVEC_RATIO] = mat->time[MATVEC_TIME] / mat->time[MATVEC_MPI_TIME]; } MPI_Allreduce(mat->time, mat->time_min, MAT_DH_BINS, MPI_DOUBLE, MPI_MIN, comm_dh); MPI_Allreduce(mat->time, mat->time_max, MAT_DH_BINS, MPI_DOUBLE, MPI_MAX, comm_dh); END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Mat_dhPermute" void Mat_dhPermute(Mat_dh A, int *n2o, Mat_dh *Bout) { START_FUNC_DH Mat_dh B; int i, j, *RP = A->rp, *CVAL = A->cval; int *o2n, *rp, *cval, m = A->m, nz = RP[m]; double *aval, *AVAL = A->aval; Mat_dhCreate(&B); CHECK_V_ERROR; B->m = B->n = m; *Bout = B; /* form inverse permutation */ o2n = (int*)MALLOC_DH(m*sizeof(int)); CHECK_V_ERROR; for (i=0; i<m; ++i) o2n[n2o[i]] = i; /* allocate storage for permuted matrix */ rp = B->rp = (int*)MALLOC_DH((m+1)*sizeof(int)); CHECK_V_ERROR; cval = B->cval = (int*)MALLOC_DH(nz*sizeof(int)); CHECK_V_ERROR; aval = B->aval = (double*)MALLOC_DH(nz*sizeof(double)); CHECK_V_ERROR; /* form new rp array */ rp[0] = 0; for (i=0; i<m; ++i) { int oldRow = n2o[i]; rp[i+1] = RP[oldRow+1]-RP[oldRow]; } for (i=1; i<=m; ++i) rp[i] = rp[i] + rp[i-1]; for (i=0; i<m; ++i) { int oldRow = n2o[i]; int idx = rp[i]; for (j=RP[oldRow]; j<RP[oldRow+1]; ++j) { cval[idx] = o2n[CVAL[j]]; aval[idx] = AVAL[j]; ++idx; } } FREE_DH(o2n); CHECK_V_ERROR; END_FUNC_DH } /*---------------------------------------------------------------------- * Print methods *----------------------------------------------------------------------*/ /* seq or mpi */ #undef __FUNC__ #define __FUNC__ "Mat_dhPrintGraph" void Mat_dhPrintGraph(Mat_dh A, SubdomainGraph_dh sg, FILE *fp) { START_FUNC_DH int pe, id = myid_dh; int ierr; if (sg != NULL) { id = sg->o2n_sub[id]; } for (pe=0; pe<np_dh; ++pe) { ierr = MPI_Barrier(comm_dh); CHECK_MPI_V_ERROR(ierr); if (id == pe) { if (sg == NULL) { mat_dh_print_graph_private(A->m, A->beg_row, A->rp, A->cval, A->aval, NULL, NULL, NULL, fp); CHECK_V_ERROR; } else { int beg_row = sg->beg_rowP[myid_dh]; mat_dh_print_graph_private(A->m, beg_row, A->rp, A->cval, A->aval, sg->n2o_row, sg->o2n_col, sg->o2n_ext, fp); CHECK_V_ERROR; } } } END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Mat_dhPrintRows" void Mat_dhPrintRows(Mat_dh A, SubdomainGraph_dh sg, FILE *fp) { START_FUNC_DH bool noValues; int m = A->m, *rp = A->rp, *cval = A->cval; double *aval = A->aval; noValues = (Parser_dhHasSwitch(parser_dh, "-noValues")); if (noValues) aval = NULL; /*---------------------------------------------------------------- * case 1: print local portion of unpermuted matrix *----------------------------------------------------------------*/ if (sg == NULL) { int i, j; int beg_row = A->beg_row; fprintf(fp, "\n----- A, unpermuted ------------------------------------\n"); for (i=0; i<m; ++i) { fprintf(fp, "%i :: ", 1+i+beg_row); for (j=rp[i]; j<rp[i+1]; ++j) { if (noValues) { fprintf(fp, "%i ", 1+cval[j]); } else { fprintf(fp, "%i,%g ; ", 1+cval[j], aval[j]); } } fprintf(fp, "\n"); } } /*---------------------------------------------------------------- * case 2: single mpi task, with multiple subdomains *----------------------------------------------------------------*/ else if (np_dh == 1) { int i, k, idx = 1; int oldRow; for (i=0; i<sg->blocks; ++i) { int oldBlock = sg->n2o_sub[i]; /* here, 'beg_row' and 'end_row' refer to rows in the original ordering of A. */ int beg_row = sg->beg_row[oldBlock]; int end_row = beg_row + sg->row_count[oldBlock]; fprintf(fp, "\n"); fprintf(fp, "\n----- A, permuted, single mpi task ------------------\n"); fprintf(fp, "---- new subdomain: %i; old subdomain: %i\n", i, oldBlock); fprintf(fp, " old beg_row: %i; new beg_row: %i\n", sg->beg_row[oldBlock], sg->beg_rowP[oldBlock]); fprintf(fp, " local rows in this block: %i\n", sg->row_count[oldBlock]); fprintf(fp, " bdry rows in this block: %i\n", sg->bdry_count[oldBlock]); fprintf(fp, " 1st bdry row= %i \n", 1+end_row-sg->bdry_count[oldBlock]); for (oldRow=beg_row; oldRow<end_row; ++oldRow) { int len = 0, *cval; double *aval; fprintf(fp, "%3i (old= %3i) :: ", idx, 1+oldRow); ++idx; Mat_dhGetRow(A, oldRow, &len, &cval, &aval); CHECK_V_ERROR; for (k=0; k<len; ++k) { if (noValues) { fprintf(fp, "%i ", 1+sg->o2n_col[cval[k]]); } else { fprintf(fp, "%i,%g ; ", 1+sg->o2n_col[cval[k]], aval[k]); } } fprintf(fp, "\n"); Mat_dhRestoreRow(A, oldRow, &len, &cval, &aval); CHECK_V_ERROR; } } } /*---------------------------------------------------------------- * case 3: multiple mpi tasks, one subdomain per task *----------------------------------------------------------------*/ else { Hash_i_dh hash = sg->o2n_ext; int *o2n_col = sg->o2n_col, *n2o_row = sg->n2o_row; int beg_row = sg->beg_row[myid_dh]; int beg_rowP = sg->beg_rowP[myid_dh]; int i, j; for (i=0; i<m; ++i) { int row = n2o_row[i]; fprintf(fp, "%3i (old= %3i) :: ", 1+i+beg_rowP, 1+row+beg_row); for (j=rp[row]; j<rp[row+1]; ++j) { int col = cval[j]; /* find permuted (old-to-new) value for the column */ /* case i: column is locally owned */ if (col >= beg_row && col < beg_row+m) { col = o2n_col[col-beg_row] + beg_rowP; } /* case ii: column is external */ else { int tmp = col; tmp = Hash_i_dhLookup(hash, col); CHECK_V_ERROR; if (tmp == -1) { sprintf(msgBuf_dh, "nonlocal column= %i not in hash table", 1+col); SET_V_ERROR(msgBuf_dh); } else { col = tmp; } } if (noValues) { fprintf(fp, "%i ", 1+col); } else { fprintf(fp, "%i,%g ; ", 1+col, aval[j]); } } fprintf(fp, "\n"); } } END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Mat_dhPrintTriples" void Mat_dhPrintTriples(Mat_dh A, SubdomainGraph_dh sg, char *filename) { START_FUNC_DH int m = A->m, *rp = A->rp, *cval = A->cval; double *aval = A->aval; bool noValues; bool matlab; FILE *fp; noValues = (Parser_dhHasSwitch(parser_dh, "-noValues")); if (noValues) aval = NULL; matlab = (Parser_dhHasSwitch(parser_dh, "-matlab")); /*---------------------------------------------------------------- * case 1: unpermuted matrix, single or multiple mpi tasks *----------------------------------------------------------------*/ if (sg == NULL) { int i, j, pe; int beg_row = A->beg_row; double val; for (pe=0; pe<np_dh; ++pe) { MPI_Barrier(comm_dh); if (pe == myid_dh) { if (pe == 0) { fp=openFile_dh(filename, "w"); CHECK_V_ERROR; } else { fp=openFile_dh(filename, "a"); CHECK_V_ERROR; } for (i=0; i<m; ++i) { for (j=rp[i]; j<rp[i+1]; ++j) { if (noValues) { fprintf(fp, "%i %i\n", 1+i+beg_row, 1+cval[j]); } else { val = aval[j]; if (val == 0.0 && matlab) val = _MATLAB_ZERO_; fprintf(fp, TRIPLES_FORMAT, 1+i+beg_row, 1+cval[j], val); } } } closeFile_dh(fp); CHECK_V_ERROR; } } } /*---------------------------------------------------------------- * case 2: single mpi task, with multiple subdomains *----------------------------------------------------------------*/ else if (np_dh == 1) { int i, j, k, idx = 1; fp=openFile_dh(filename, "w"); CHECK_V_ERROR; for (i=0; i<sg->blocks; ++i) { int oldBlock = sg->n2o_sub[i]; int beg_row = sg->beg_rowP[oldBlock]; int end_row = beg_row + sg->row_count[oldBlock]; for (j=beg_row; j<end_row; ++j) { int len = 0, *cval; double *aval; int oldRow = sg->n2o_row[j]; Mat_dhGetRow(A, oldRow, &len, &cval, &aval); CHECK_V_ERROR; if (noValues) { for (k=0; k<len; ++k) { fprintf(fp, "%i %i\n", idx, 1+sg->o2n_col[cval[k]]); } ++idx; } else { for (k=0; k<len; ++k) { double val = aval[k]; if (val == 0.0 && matlab) val = _MATLAB_ZERO_; fprintf(fp, TRIPLES_FORMAT, idx, 1+sg->o2n_col[cval[k]], val); } ++idx; } Mat_dhRestoreRow(A, oldRow, &len, &cval, &aval); CHECK_V_ERROR; } } } /*---------------------------------------------------------------- * case 3: multiple mpi tasks, one subdomain per task *----------------------------------------------------------------*/ else { Hash_i_dh hash = sg->o2n_ext; int *o2n_col = sg->o2n_col, *n2o_row = sg->n2o_row; int beg_row = sg->beg_row[myid_dh]; int beg_rowP = sg->beg_rowP[myid_dh]; int i, j, pe; int id = sg->o2n_sub[myid_dh]; for (pe=0; pe<np_dh; ++pe) { MPI_Barrier(comm_dh); if (id == pe) { if (pe == 0) { fp=openFile_dh(filename, "w"); CHECK_V_ERROR; } else { fp=openFile_dh(filename, "a"); CHECK_V_ERROR; } for (i=0; i<m; ++i) { int row = n2o_row[i]; for (j=rp[row]; j<rp[row+1]; ++j) { int col = cval[j]; double val = 0.0; if (aval != NULL) val = aval[j]; if (val == 0.0 && matlab) val = _MATLAB_ZERO_; /* find permuted (old-to-new) value for the column */ /* case i: column is locally owned */ if (col >= beg_row && col < beg_row+m) { col = o2n_col[col-beg_row] + beg_rowP; } /* case ii: column is external */ else { int tmp = col; tmp = Hash_i_dhLookup(hash, col); CHECK_V_ERROR; if (tmp == -1) { sprintf(msgBuf_dh, "nonlocal column= %i not in hash table", 1+col); SET_V_ERROR(msgBuf_dh); } else { col = tmp; } } if (noValues) { fprintf(fp, "%i %i\n", 1+i+beg_rowP, 1+col); } else { fprintf(fp, TRIPLES_FORMAT, 1+i+beg_rowP, 1+col, val); } } } closeFile_dh(fp); CHECK_V_ERROR; } } } END_FUNC_DH } /* seq only */ #undef __FUNC__ #define __FUNC__ "Mat_dhPrintCSR" void Mat_dhPrintCSR(Mat_dh A, SubdomainGraph_dh sg, char *filename) { START_FUNC_DH FILE *fp; if (np_dh > 1) { SET_V_ERROR("only implemented for a single mpi task"); } if (sg != NULL) { SET_V_ERROR("not implemented for reordered matrix (SubdomainGraph_dh should be NULL)"); } fp=openFile_dh(filename, "w"); CHECK_V_ERROR; if (sg == NULL) { mat_dh_print_csr_private(A->m, A->rp, A->cval, A->aval, fp); CHECK_V_ERROR; } else { mat_dh_print_csr_private(A->m, A->rp, A->cval, A->aval, fp); CHECK_V_ERROR; } closeFile_dh(fp); CHECK_V_ERROR; END_FUNC_DH } /* seq */ /* no reordering */ #undef __FUNC__ #define __FUNC__ "Mat_dhPrintBIN" void Mat_dhPrintBIN(Mat_dh A, SubdomainGraph_dh sg, char *filename) { START_FUNC_DH if (np_dh > 1) { SET_V_ERROR("only implemented for a single MPI task"); } /* if (n2o != NULL || o2n != NULL || hash != NULL) { */ if (sg != NULL) { SET_V_ERROR("not implemented for reordering; ensure sg=NULL"); } io_dh_print_ebin_mat_private(A->m, A->beg_row, A->rp, A->cval, A->aval, NULL, NULL, NULL, filename); CHECK_V_ERROR; END_FUNC_DH } /*---------------------------------------------------------------------- * Read methods *----------------------------------------------------------------------*/ /* seq only */ #undef __FUNC__ #define __FUNC__ "Mat_dhReadCSR" void Mat_dhReadCSR(Mat_dh *mat, char *filename) { START_FUNC_DH Mat_dh A; FILE *fp; if (np_dh > 1) { SET_V_ERROR("only implemented for a single MPI task"); } fp=openFile_dh(filename, "r"); CHECK_V_ERROR; Mat_dhCreate(&A); CHECK_V_ERROR; mat_dh_read_csr_private(&A->m, &A->rp, &A->cval, &A->aval, fp); CHECK_V_ERROR; A->n = A->m; *mat = A; closeFile_dh(fp); CHECK_V_ERROR; END_FUNC_DH } /* seq only */ #undef __FUNC__ #define __FUNC__ "Mat_dhReadTriples" void Mat_dhReadTriples(Mat_dh *mat, int ignore, char *filename) { START_FUNC_DH FILE *fp = NULL; Mat_dh A = NULL; if (np_dh > 1) { SET_V_ERROR("only implemented for a single MPI task"); } fp=openFile_dh(filename, "r"); CHECK_V_ERROR; Mat_dhCreate(&A); CHECK_V_ERROR; mat_dh_read_triples_private(ignore, &A->m, &A->rp, &A->cval, &A->aval, fp); CHECK_V_ERROR; A->n = A->m; *mat = A; closeFile_dh(fp); CHECK_V_ERROR; END_FUNC_DH } /* here we pass the private function a filename, instead of an open file, the reason being that Euclid's binary format is more complicated, i.e, the other "Read" methods are only for a single mpi task. */ #undef __FUNC__ #define __FUNC__ "Mat_dhReadBIN" void Mat_dhReadBIN(Mat_dh *mat, char *filename) { START_FUNC_DH Mat_dh A; if (np_dh > 1) { SET_V_ERROR("only implemented for a single MPI task"); } Mat_dhCreate(&A); CHECK_V_ERROR; io_dh_read_ebin_mat_private(&A->m, &A->rp, &A->cval, &A->aval, filename); CHECK_V_ERROR; A->n = A->m; *mat = A; END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Mat_dhTranspose" void Mat_dhTranspose(Mat_dh A, Mat_dh *Bout) { START_FUNC_DH Mat_dh B; if (np_dh > 1) { SET_V_ERROR("only for sequential"); } Mat_dhCreate(&B); CHECK_V_ERROR; *Bout = B; B->m = B->n = A->m; mat_dh_transpose_private(A->m, A->rp, &B->rp, A->cval, &B->cval, A->aval, &B->aval); CHECK_V_ERROR; END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Mat_dhMakeStructurallySymmetric" void Mat_dhMakeStructurallySymmetric(Mat_dh A) { START_FUNC_DH if (np_dh > 1) { SET_V_ERROR("only for sequential"); } make_symmetric_private(A->m, &A->rp, &A->cval, &A->aval); CHECK_V_ERROR; END_FUNC_DH } void insert_diags_private(Mat_dh A, int ct); /* inserts diagonal if not explicitly present; sets diagonal value in row i to sum of absolute values of all elts in row i. */ #undef __FUNC__ #define __FUNC__ "Mat_dhFixDiags" void Mat_dhFixDiags(Mat_dh A) { START_FUNC_DH int i, j; int *rp = A->rp, *cval = A->cval, m = A->m; bool ct = 0; /* number of missing diagonals */ double *aval = A->aval; /* determine if any diagonals are missing */ for (i=0; i<m; ++i) { bool flag = true; for (j=rp[i]; j<rp[i+1]; ++j) { int col = cval[j]; if (col == i) { flag = false; break; } } if (flag) ++ct; } /* insert any missing diagonal elements */ if (ct) { printf("\nMat_dhFixDiags:: %i diags not explicitly present; inserting!\n", ct); insert_diags_private(A, ct); CHECK_V_ERROR; rp = A->rp; cval = A->cval; aval = A->aval; } /* set the value of all diagonal elements */ for (i=0; i<m; ++i) { double sum = 0.0; for (j=rp[i]; j<rp[i+1]; ++j) { sum += fabs(aval[j]); } for (j=rp[i]; j<rp[i+1]; ++j) { if (cval[j] == i) { aval[j] = sum; } } } END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "insert_diags_private" void insert_diags_private(Mat_dh A, int ct) { START_FUNC_DH int *RP = A->rp, *CVAL = A->cval; int *rp, *cval, m = A->m; double *aval, *AVAL = A->aval; int nz = RP[m] + ct; int i, j, idx = 0; rp = A->rp = (int*)MALLOC_DH((m+1)*sizeof(int)); CHECK_V_ERROR; cval = A->cval = (int*)MALLOC_DH(nz*sizeof(int)); CHECK_V_ERROR; aval = A->aval = (double*)MALLOC_DH(nz*sizeof(double)); CHECK_V_ERROR; rp[0] = 0; for (i=0; i<m; ++i) { bool flag = true; for (j=RP[i]; j<RP[i+1]; ++j) { cval[idx] = CVAL[j]; aval[idx] = AVAL[j]; ++idx; if (CVAL[j] == i) flag = false; } if (flag) { cval[idx] = i; aval[idx] = 0.0; ++idx; } rp[i+1] = idx; } FREE_DH(RP); CHECK_V_ERROR; FREE_DH(CVAL); CHECK_V_ERROR; FREE_DH(AVAL); CHECK_V_ERROR; END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Mat_dhPrintDiags" void Mat_dhPrintDiags(Mat_dh A, FILE *fp) { START_FUNC_DH int i, j, m = A->m; int *rp = A->rp, *cval = A->cval; double *aval = A->aval; fprintf(fp, "=================== diagonal elements ====================\n"); for (i=0; i<m; ++i) { bool flag = true; for (j=rp[i]; j<rp[i+1]; ++j) { if (cval[j] == i) { fprintf(fp, "%i %g\n", i+1, aval[j]); flag = false; break; } } if (flag) { fprintf(fp, "%i ---------- missing\n", i+1); } } END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Mat_dhGetRow" void Mat_dhGetRow(Mat_dh B, int globalRow, int *len, int **ind, double **val) { START_FUNC_DH int row = globalRow - B->beg_row; if (row > B->m) { sprintf(msgBuf_dh, "requested globalRow= %i, which is local row= %i, but only have %i rows!", globalRow, row, B->m); SET_V_ERROR(msgBuf_dh); } *len = B->rp[row+1] - B->rp[row]; if (ind != NULL) *ind = B->cval + B->rp[row]; if (val != NULL) *val = B->aval + B->rp[row]; END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Mat_dhRestoreRow" void Mat_dhRestoreRow(Mat_dh B, int row, int *len, int **ind, double **val) { START_FUNC_DH END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Mat_dhRowPermute" void Mat_dhRowPermute(Mat_dh mat) { START_FUNC_DH if (ignoreMe) SET_V_ERROR("turned off; compilation problem on blue"); #if 0 int i, j, m = mat->m, nz = mat->rp[m]; int *o2n, *cval; int algo = 1; double *r1, *c1; bool debug = mat->debug; bool isNatural; Mat_dh B; #if 0 * = 1 : Compute a row permutation of the matrix so that the * permuted matrix has as many entries on its diagonal as * possible. The values on the diagonal are of arbitrary size. * HSL subroutine MC21A/AD is used for this. * = 2 : Compute a row permutation of the matrix so that the smallest * value on the diagonal of the permuted matrix is maximized. * = 3 : Compute a row permutation of the matrix so that the smallest * value on the diagonal of the permuted matrix is maximized. * The algorithm differs from the one used for JOB = 2 and may * have quite a different performance. * = 4 : Compute a row permutation of the matrix so that the sum * of the diagonal entries of the permuted matrix is maximized. * = 5 : Compute a row permutation of the matrix so that the product * of the diagonal entries of the permuted matrix is maximized * and vectors to scale the matrix so that the nonzero diagonal * entries of the permuted matrix are one in absolute value and * all the off-diagonal entries are less than or equal to one in * absolute value. #endif Parser_dhReadInt(parser_dh, "-rowPermute", &algo); CHECK_V_ERROR; if (algo < 1) algo = 1; if (algo > 5) algo = 1; sprintf(msgBuf_dh, "calling row permutation with algo= %i", algo); SET_INFO(msgBuf_dh); r1 = (double*)MALLOC_DH(m*sizeof(double)); CHECK_V_ERROR; c1 = (double*)MALLOC_DH(m*sizeof(double)); CHECK_V_ERROR; if (mat->row_perm == NULL) { mat->row_perm = o2n = (int*)MALLOC_DH(m*sizeof(int)); CHECK_V_ERROR; } else { o2n = mat->row_perm; } Mat_dhTranspose(mat, &B); CHECK_V_ERROR; /* get row permutation and scaling vectors */ dldperm(algo, m, nz, B->rp, B->cval, B->aval, o2n, r1, c1); /* permute column indices, then turn the matrix rightside up */ cval = B->cval; for (i=0; i<nz; ++i) cval[i] = o2n[cval[i]]; /* debug block */ if (debug && logFile != NULL) { fprintf(logFile, "\n-------- row permutation vector --------\n"); for (i=0; i<m; ++i) fprintf(logFile, "%i ", 1+o2n[i]); fprintf(logFile, "\n"); if (myid_dh == 0) { printf("\n-------- row permutation vector --------\n"); for (i=0; i<m; ++i) printf("%i ", 1+o2n[i]); printf("\n"); } } /* check to see if permutation is non-natural */ isNatural = true; for (i=0; i<m; ++i) { if (o2n[i] != i) { isNatural = false; break; } } if (isNatural) { printf("@@@ [%i] Mat_dhRowPermute :: got natural ordering!\n", myid_dh); } else { int *rp = B->rp, *cval = B->cval; double *aval = B->aval; if (algo == 5) { printf("@@@ [%i] Mat_dhRowPermute :: scaling matrix rows and columns!\n", myid_dh); /* scale matrix */ for (i=0; i<m; i++) { r1[i] = exp(r1[i]); c1[i] = exp(c1[i]); } for (i=0; i<m; i++) for (j=rp[i]; j<rp[i+1]; j++) aval[j] *= r1[cval[j]] * c1[i]; } mat_dh_transpose_reuse_private(B->m, B->rp, B->cval, B->aval, mat->rp, mat->cval, mat->aval); CHECK_V_ERROR; } Mat_dhDestroy(B); CHECK_V_ERROR; FREE_DH(r1); CHECK_V_ERROR; FREE_DH(c1); CHECK_V_ERROR; #endif END_FUNC_DH } /*==============================================================================*/ #undef __FUNC__ #define __FUNC__ "Mat_dhPartition" void build_adj_lists_private(Mat_dh mat, int **rpOUT, int **cvalOUT) { START_FUNC_DH int m = mat->m; int *RP = mat->rp, *CVAL = mat->cval; int nz = RP[m]; int i, j, *rp, *cval, idx = 0; rp = *rpOUT = (int *)MALLOC_DH((m+1)*sizeof(int)); CHECK_V_ERROR; cval = *cvalOUT = (int *)MALLOC_DH(nz*sizeof(int)); CHECK_V_ERROR; rp[0] = 0; /* assume symmetry for now! */ for (i=0; i<m; ++i) { for (j=RP[i]; j<RP[i+1]; ++j) { int col = CVAL[j]; if (col != i) { cval[idx++] = col; } } rp[i+1] = idx; } END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Mat_dhPartition" void Mat_dhPartition(Mat_dh mat, int blocks, int **beg_rowOUT, int **row_countOUT, int **n2oOUT, int **o2nOUT) { START_FUNC_DH #ifndef HAVE_METIS_DH if (ignoreMe) SET_V_ERROR("not compiled for metis!"); #else int *beg_row, *row_count, *n2o, *o2n, bk, new, *part; int m = mat->m; int i, cutEdgeCount; double zero = 0.0; int metisOpts[5] = {0, 0, 0, 0, 0}; int *rp, *cval; /* allocate storage for returned arrays */ beg_row = *beg_rowOUT = (int *)MALLOC_DH(blocks*sizeof(int)); CHECK_V_ERROR; row_count = *row_countOUT = (int *)MALLOC_DH(blocks*sizeof(int)); CHECK_V_ERROR; *n2oOUT = n2o = (int *)MALLOC_DH(m*sizeof(int)); CHECK_V_ERROR; *o2nOUT = o2n = (int *)MALLOC_DH(m*sizeof(int)); CHECK_V_ERROR; #if 0 ============================================================= Metis arguments: n - number of nodes rp[], cval[] NULL, NULL, 0 /*no edge or vertex weights*/ 0 /*use zero-based numbering*/ blocksIN, options[5] = 0 :: 0/1 use defauls; use uptions 1..4 1 :: edgecutOUT, part[] ============================================================= #endif /* form the graph representation that metis wants */ build_adj_lists_private(mat, &rp, &cval); CHECK_V_ERROR; part = (int *)MALLOC_DH(m*sizeof(int)); CHECK_V_ERROR; /* get parition vector from metis */ METIS_PartGraphKway(&m, rp, cval, NULL, NULL, &zero, &zero, &blocks, metisOpts, &cutEdgeCount, part); FREE_DH(rp); CHECK_V_ERROR; FREE_DH(cval); CHECK_V_ERROR; if (mat->debug) { printf_dh("\nmetis partitioning vector; blocks= %i\n", blocks); for (i=0; i<m; ++i) printf_dh(" %i %i\n", i+1, part[i]); } /* compute beg_row, row_count arrays from partition vector */ for (i=0; i<blocks; ++i) row_count[i] = 0; for (i=0; i<m; ++i) { bk = part[i]; /* block to which row i belongs */ row_count[bk] += 1; } beg_row[0] = 0; for (i=1; i<blocks; ++i) beg_row[i] = beg_row[i-1] + row_count[i-1]; if (mat->debug) { printf_dh("\nrow_counts: "); for (i=0; i<blocks; ++i) printf_dh(" %i", row_count[i]); printf_dh("\nbeg_row: "); for (i=0; i<blocks; ++i) printf_dh(" %i", beg_row[i]+1); printf_dh("\n"); } /* compute permutation vector */ { int *tmp = (int*)MALLOC_DH(blocks*sizeof(int)); CHECK_V_ERROR; memcpy(tmp, beg_row, blocks*sizeof(int)); for (i=0; i<m; ++i) { bk = part[i]; /* block to which row i belongs */ new = tmp[bk]; tmp[bk] += 1; o2n[i] = new; n2o[new] = i; } FREE_DH(tmp); } FREE_DH(part); CHECK_V_ERROR; #endif END_FUNC_DH }
8.race4.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s // XFAIL: * #include <omp.h> #define N 20 int main() { double A[N], B[N], sum0 = 0.0, sum1 = 0.0; #pragma omp parallel { #pragma omp for nowait for (int i = 0; i < N; i++) { A[i] = i; B[i] = i * i; } #pragma omp for reduction(+ : sum0) for (int i = 0; i < N; i++) { sum0 += A[i] * B[i]; } } for (int i = 0; i < N; i++) { sum1 += i * i * i; } return (sum1 - sum0); } // CHECK: Data Race detected // CHECK: Data Race detected // END
GB_binop__rminus_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_int16) // A.*B function (eWiseMult): GB (_AemultB_08__rminus_int16) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_int16) // A.*B function (eWiseMult): GB (_AemultB_04__rminus_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_int16) // A*D function (colscale): GB (_AxD__rminus_int16) // D*A function (rowscale): GB (_DxB__rminus_int16) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_int16) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_int16) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_int16) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_int16) // C=scalar+B GB (_bind1st__rminus_int16) // C=scalar+B' GB (_bind1st_tran__rminus_int16) // C=A+scalar GB (_bind2nd__rminus_int16) // C=A'+scalar GB (_bind2nd_tran__rminus_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (y - x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_INT16 || GxB_NO_RMINUS_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rminus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_int16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rminus_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rminus_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB (_bind1st_tran__rminus_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Pragma.h
//===--- Pragma.h - Pragma registration and handling ------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the PragmaHandler and PragmaTable interfaces. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PRAGMA_H #define LLVM_CLANG_PRAGMA_H #include "clang/Basic/LLVM.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringRef.h" #include <cassert> namespace clang { class Preprocessor; class Token; class IdentifierInfo; class PragmaNamespace; /** * \brief Describes how the pragma was introduced, e.g., with \#pragma, * _Pragma, or __pragma. */ enum PragmaIntroducerKind { /** * \brief The pragma was introduced via \#pragma. */ PIK_HashPragma, /** * \brief The pragma was introduced via the C99 _Pragma(string-literal). */ PIK__Pragma, /** * \brief The pragma was introduced via the Microsoft * __pragma(token-string). */ PIK___pragma }; /// PragmaHandler - Instances of this interface defined to handle the various /// pragmas that the language front-end uses. Each handler optionally has a /// name (e.g. "pack") and the HandlePragma method is invoked when a pragma with /// that identifier is found. If a handler does not match any of the declared /// pragmas the handler with a null identifier is invoked, if it exists. /// /// Note that the PragmaNamespace class can be used to subdivide pragmas, e.g. /// we treat "\#pragma STDC" and "\#pragma GCC" as namespaces that contain other /// pragmas. class PragmaHandler { std::string Name; public: explicit PragmaHandler(StringRef name) : Name(name) {} PragmaHandler() {} virtual ~PragmaHandler(); StringRef getName() const { return Name; } virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer, Token &FirstToken) = 0; /// getIfNamespace - If this is a namespace, return it. This is equivalent to /// using a dynamic_cast, but doesn't require RTTI. virtual PragmaNamespace *getIfNamespace() { return 0; } }; /// EmptyPragmaHandler - A pragma handler which takes no action, which can be /// used to ignore particular pragmas. class EmptyPragmaHandler : public PragmaHandler { public: EmptyPragmaHandler(); virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer, Token &FirstToken); }; /// PragmaNamespace - This PragmaHandler subdivides the namespace of pragmas, /// allowing hierarchical pragmas to be defined. Common examples of namespaces /// are "\#pragma GCC", "\#pragma STDC", and "\#pragma omp", but any namespaces /// may be (potentially recursively) defined. class PragmaNamespace : public PragmaHandler { /// Handlers - This is a map of the handlers in this namespace with their name /// as key. /// llvm::StringMap<PragmaHandler*> Handlers; public: explicit PragmaNamespace(StringRef Name) : PragmaHandler(Name) {} virtual ~PragmaNamespace(); /// FindHandler - Check to see if there is already a handler for the /// specified name. If not, return the handler for the null name if it /// exists, otherwise return null. If IgnoreNull is true (the default) then /// the null handler isn't returned on failure to match. PragmaHandler *FindHandler(StringRef Name, bool IgnoreNull = true) const; /// AddPragma - Add a pragma to this namespace. /// void AddPragma(PragmaHandler *Handler); /// RemovePragmaHandler - Remove the given handler from the /// namespace. void RemovePragmaHandler(PragmaHandler *Handler); bool IsEmpty() { return Handlers.empty(); } virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer, Token &FirstToken); virtual PragmaNamespace *getIfNamespace() { return this; } }; } // end namespace clang #endif
ParallelCG.h
/* This file is part of NSEssentials. Use of this source code is granted via a BSD-style license, which can be found in License.txt in the repository root. @author Nico Schertler @author Misha Kazhdan */ #pragma once #ifdef HAVE_EIGEN #include <Eigen/Dense> // A parallel conjugate gradient solver complying to the Eigen // Sparse Solver concept. namespace nse { namespace math { template <typename Matrix> class ParallelCG { public: ParallelCG() : maxIterations(-1), m(nullptr), toleranceSq(-1) { } //Specifies the column range of the initial guess and the solution that you want to solve int solveColLowerInclusive = 0; int solveColUpperExclusive = -1; void setMaxIterations(int i) { maxIterations = i; } void setTolerance(double t) { toleranceSq = t * t; } int iterations() const { return _iterations; } void compute(const Matrix& m) { if (maxIterations == -1) maxIterations = m.rows(); if (toleranceSq == -1) toleranceSq = 1e-16; this->m = &m; //Calculate preconditioner invDiag.resize(m.rows()); for (int j = 0; j < m.outerSize(); ++j) { typename Matrix::InnerIterator it(m, j); while (it && it.index() != j) ++it; if (it && it.index() == j && it.value() != 0) invDiag(j) = 1.0f / it.value(); else invDiag(j) = 1; } } template <typename RHSType, typename SolutionType, typename Scalar = typename RHSType::Scalar> void solveWithGuess(const RHSType& rhs, const SolutionType& guess, SolutionType& solution) { Eigen::Matrix<Scalar, -1, 1> r(rhs.rows()), d(rhs.rows()), q(rhs.rows()), s(rhs.rows()); int upperCol = solveColUpperExclusive; if (upperCol < 0) upperCol = guess.cols(); assert(upperCol - solveColLowerInclusive == rhs.cols()); _iterations = 0; for (int col = solveColLowerInclusive; col < upperCol; ++col) { #pragma omp parallel for for (int i = 0; i < solution.rows(); ++i) solution.coeffRef(i, col) = guess.coeff(i, col); parallelMatrixMultiplyVector(*m, solution, col, r); Scalar rhsNormSq = 0; #pragma omp parallel for reduction( + : rhsNormSq) for (int i = 0; i < rhs.rows(); i++) { r(i) = rhs.coeff(i, col - solveColLowerInclusive) - r(i); d(i) = invDiag(i) * r(i); rhsNormSq += rhs.coeff(i, col - solveColLowerInclusive) * rhs.coeff(i, col - solveColLowerInclusive); } Scalar threshold = toleranceSq * rhsNormSq; Scalar delta_new = 0; #pragma omp parallel for reduction( + : delta_new ) for (int i = 0; i < rhs.rows(); i++) delta_new += r(i) * d(i); if (delta_new < threshold) { continue; } int it; for (it = 0; it < maxIterations && delta_new > threshold; it++) { parallelMatrixMultiplyVector(*m, d, 0, q); Scalar dDotQ = 0; #pragma omp parallel for reduction( + : dDotQ ) for (int i = 0; i < rhs.rows(); i++) dDotQ += d(i) * q(i); Scalar alpha = delta_new / dDotQ; #pragma omp parallel for for (int i = 0; i < rhs.rows(); i++) solution.coeffRef(i, col) = (typename SolutionType::Scalar)(solution.coeff(i, col) + d(i) * alpha); const int RESET_COUNT = 50; if ((it % RESET_COUNT) == (RESET_COUNT - 1)) { parallelMatrixMultiplyVector(*m, solution, col, r); #pragma omp parallel for for (int i = 0; i < rhs.rows(); i++) { r(i) = rhs.coeff(i, col - solveColLowerInclusive) - r(i); s(i) = invDiag(i) * r(i); } } else { #pragma omp parallel for for (int i = 0; i < rhs.rows(); i++) { r(i) = (typename RHSType::Scalar)(r(i) - q(i) * alpha); s(i) = invDiag(i) * r(i); } } Scalar delta_old = delta_new; delta_new = 0; #pragma omp parallel for reduction( + : delta_new ) for (int i = 0; i < rhs.rows(); i++) delta_new += r(i) * s(i); Scalar beta = delta_new / delta_old; #pragma omp parallel for for (int i = 0; i < rhs.rows(); i++) d(i) = (typename RHSType::Scalar)(s(i) + d(i) * beta); } _iterations += it; } //for every column _iterations /= upperCol - solveColLowerInclusive; } private: template <typename RHSType, typename SolutionType> void parallelMatrixMultiplyVector(const Matrix& m, const RHSType& x, int col, SolutionType& out) const { #pragma omp parallel for for (int row = 0; row < m.rows(); row++) { double accum = 0; for (typename Matrix::InnerIterator it(m, row); it; ++it) accum += it.value() * x.coeff(it.index(), col); out(row) = (typename SolutionType::Scalar)accum; } } Eigen::VectorXf invDiag; int maxIterations; double toleranceSq; const Matrix* m; int _iterations; }; } } #endif
convolution_3x3_pack16.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_pack16_avx512(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 16b-16a-inch/16a-64-outch/16b kernel_tm_pack8.create(inch / 16, 64, outch / 16, (size_t)4u * 16 * 16, 16 * 16); int q = 0; for (; q + 15 < outch; q += 16) { Mat g0 = kernel_tm_pack8.channel(q / 16); for (int k = 0; k < 64; k++) { float* g00 = g0.row(k); for (int p = 0; p + 15 < inch; p += 16) { for (int i = 0; i < 16; i++) { for (int j = 0; j < 16; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = k00[k]; g00++; } } } } } } static void conv3x3s1_winograd64_pack16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 6; int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); conv3x3s1_winograd64_transform_input_pack16_avx512(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; Mat bottom_blob_tm2; if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 11 < tiles; i += 12) { float* tmpptr = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 16; for (int q = 0; q < inch; q++) { // transpose 16x12 __m512 _r0 = _mm512_load_ps(r0); __m512 _r1 = _mm512_load_ps(r0 + 16); __m512 _r2 = _mm512_load_ps(r0 + 16 * 2); __m512 _r3 = _mm512_load_ps(r0 + 16 * 3); __m512 _r4 = _mm512_load_ps(r0 + 16 * 4); __m512 _r5 = _mm512_load_ps(r0 + 16 * 5); __m512 _r6 = _mm512_load_ps(r0 + 16 * 6); __m512 _r7 = _mm512_load_ps(r0 + 16 * 7); __m512 _r8 = _mm512_load_ps(r0 + 16 * 8); __m512 _r9 = _mm512_load_ps(r0 + 16 * 9); __m512 _ra = _mm512_load_ps(r0 + 16 * 10); __m512 _rb = _mm512_load_ps(r0 + 16 * 11); __m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1); __m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1); __m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3); __m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3); __m512 _tmp4 = _mm512_unpacklo_ps(_r4, _r5); __m512 _tmp5 = _mm512_unpackhi_ps(_r4, _r5); __m512 _tmp6 = _mm512_unpacklo_ps(_r6, _r7); __m512 _tmp7 = _mm512_unpackhi_ps(_r6, _r7); __m512 _tmp8 = _mm512_unpacklo_ps(_r8, _r9); __m512 _tmp9 = _mm512_unpackhi_ps(_r8, _r9); __m512 _tmpa = _mm512_unpacklo_ps(_ra, _rb); __m512 _tmpb = _mm512_unpackhi_ps(_ra, _rb); __m512 _tmpc = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmpd = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2)); __m512 _tmpe = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmpf = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2)); __m512 _tmpg = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmph = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2)); __m512 _tmpi = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmpj = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2)); __m512 _tmpk = _mm512_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmpl = _mm512_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(3, 2, 3, 2)); __m512 _tmpm = _mm512_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmpn = _mm512_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(3, 2, 3, 2)); _tmp0 = _mm512_shuffle_f32x4(_tmpc, _tmpg, _MM_SHUFFLE(2, 0, 2, 0)); _tmp1 = _mm512_shuffle_f32x4(_tmpk, _tmpd, _MM_SHUFFLE(2, 0, 2, 0)); _tmp2 = _mm512_shuffle_f32x4(_tmph, _tmpl, _MM_SHUFFLE(2, 0, 2, 0)); _tmp3 = _mm512_shuffle_f32x4(_tmpe, _tmpi, _MM_SHUFFLE(2, 0, 2, 0)); _tmp4 = _mm512_shuffle_f32x4(_tmpm, _tmpf, _MM_SHUFFLE(2, 0, 2, 0)); _tmp5 = _mm512_shuffle_f32x4(_tmpj, _tmpn, _MM_SHUFFLE(2, 0, 2, 0)); _tmp6 = _mm512_shuffle_f32x4(_tmpc, _tmpg, _MM_SHUFFLE(3, 1, 3, 1)); _tmp7 = _mm512_shuffle_f32x4(_tmpk, _tmpd, _MM_SHUFFLE(3, 1, 3, 1)); _tmp8 = _mm512_shuffle_f32x4(_tmph, _tmpl, _MM_SHUFFLE(3, 1, 3, 1)); _tmp9 = _mm512_shuffle_f32x4(_tmpe, _tmpi, _MM_SHUFFLE(3, 1, 3, 1)); _tmpa = _mm512_shuffle_f32x4(_tmpm, _tmpf, _MM_SHUFFLE(3, 1, 3, 1)); _tmpb = _mm512_shuffle_f32x4(_tmpj, _tmpn, _MM_SHUFFLE(3, 1, 3, 1)); _r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0)); _r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0)); _r2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0)); _r3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0)); _r4 = _mm512_shuffle_f32x4(_tmp8, _tmp9, _MM_SHUFFLE(2, 0, 2, 0)); _r5 = _mm512_shuffle_f32x4(_tmpa, _tmpb, _MM_SHUFFLE(2, 0, 2, 0)); _r6 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1)); _r7 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1)); _r8 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1)); _r9 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1)); _ra = _mm512_shuffle_f32x4(_tmp8, _tmp9, _MM_SHUFFLE(3, 1, 3, 1)); _rb = _mm512_shuffle_f32x4(_tmpa, _tmpb, _MM_SHUFFLE(3, 1, 3, 1)); _mm512_store_ps(tmpptr, _r0); _mm512_store_ps(tmpptr + 16, _r1); _mm512_store_ps(tmpptr + 16 * 2, _r2); _mm512_store_ps(tmpptr + 16 * 3, _r3); _mm512_store_ps(tmpptr + 16 * 4, _r4); _mm512_store_ps(tmpptr + 16 * 5, _r5); _mm512_store_ps(tmpptr + 16 * 6, _r6); _mm512_store_ps(tmpptr + 16 * 7, _r7); _mm512_store_ps(tmpptr + 16 * 8, _r8); _mm512_store_ps(tmpptr + 16 * 9, _r9); _mm512_store_ps(tmpptr + 16 * 10, _ra); _mm512_store_ps(tmpptr + 16 * 11, _rb); tmpptr += 192; r0 += bottom_blob_tm.cstep * 16; } } for (; i + 7 < tiles; i += 8) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 16; for (int q = 0; q < inch; q++) { // transpose 16x8 __m512 _r0 = _mm512_load_ps(r0); __m512 _r1 = _mm512_load_ps(r0 + 16); __m512 _r2 = _mm512_load_ps(r0 + 16 * 2); __m512 _r3 = _mm512_load_ps(r0 + 16 * 3); __m512 _r4 = _mm512_load_ps(r0 + 16 * 4); __m512 _r5 = _mm512_load_ps(r0 + 16 * 5); __m512 _r6 = _mm512_load_ps(r0 + 16 * 6); __m512 _r7 = _mm512_load_ps(r0 + 16 * 7); __m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1); __m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1); __m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3); __m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3); __m512 _tmp4 = _mm512_unpacklo_ps(_r4, _r5); __m512 _tmp5 = _mm512_unpackhi_ps(_r4, _r5); __m512 _tmp6 = _mm512_unpacklo_ps(_r6, _r7); __m512 _tmp7 = _mm512_unpackhi_ps(_r6, _r7); __m512 _tmp8 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmp9 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2)); __m512 _tmpa = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmpb = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2)); __m512 _tmpc = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmpd = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2)); __m512 _tmpe = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmpf = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2)); _tmp0 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(2, 0, 2, 0)); _tmp1 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(2, 0, 2, 0)); _tmp2 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(2, 0, 2, 0)); _tmp3 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(2, 0, 2, 0)); _tmp4 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(3, 1, 3, 1)); _tmp5 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(3, 1, 3, 1)); _tmp6 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(3, 1, 3, 1)); _tmp7 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(3, 1, 3, 1)); _r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0)); _r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0)); _r2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0)); _r3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0)); _r4 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1)); _r5 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1)); _r6 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1)); _r7 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1)); _mm512_store_ps(tmpptr, _r0); _mm512_store_ps(tmpptr + 16, _r1); _mm512_store_ps(tmpptr + 16 * 2, _r2); _mm512_store_ps(tmpptr + 16 * 3, _r3); _mm512_store_ps(tmpptr + 16 * 4, _r4); _mm512_store_ps(tmpptr + 16 * 5, _r5); _mm512_store_ps(tmpptr + 16 * 6, _r6); _mm512_store_ps(tmpptr + 16 * 7, _r7); tmpptr += 128; r0 += bottom_blob_tm.cstep * 16; } } for (; i + 3 < tiles; i += 4) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 16; for (int q = 0; q < inch; q++) { // transpose 16x4 __m512 _r0 = _mm512_load_ps(r0); __m512 _r1 = _mm512_load_ps(r0 + 16); __m512 _r2 = _mm512_load_ps(r0 + 16 * 2); __m512 _r3 = _mm512_load_ps(r0 + 16 * 3); __m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1); __m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1); __m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3); __m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3); __m512 _tmp4 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmp5 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2)); __m512 _tmp6 = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmp7 = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2)); _tmp0 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0)); _tmp1 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0)); _tmp2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1)); _tmp3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1)); _r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0)); _r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0)); _r2 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1)); _r3 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1)); _mm512_store_ps(tmpptr, _r0); _mm512_store_ps(tmpptr + 16, _r1); _mm512_store_ps(tmpptr + 16 * 2, _r2); _mm512_store_ps(tmpptr + 16 * 3, _r3); tmpptr += 64; r0 += bottom_blob_tm.cstep * 16; } } for (; i + 1 < tiles; i += 2) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 16; for (int q = 0; q < inch; q++) { // transpose 16x2 __m512 _r0 = _mm512_load_ps(r0); __m512 _r1 = _mm512_load_ps(r0 + 16); __m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1); __m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1); __m512 _tmp2 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0)); __m512 _tmp3 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1)); _r0 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0)); _r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1)); _mm512_store_ps(tmpptr, _r0); _mm512_store_ps(tmpptr + 16, _r1); tmpptr += 32; r0 += bottom_blob_tm.cstep * 16; } } for (; i < tiles; i++) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 16; for (int q = 0; q < inch; q++) { __m512 _val = _mm512_load_ps(r0); _mm512_store_ps(tmpptr, _val); tmpptr += 16; r0 += bottom_blob_tm.cstep * 16; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch * 16; // inch always > 0 __m512 _sum0 = _mm512_setzero_ps(); __m512 _sum1 = _mm512_setzero_ps(); __m512 _sum2 = _mm512_setzero_ps(); __m512 _sum3 = _mm512_setzero_ps(); __m512 _sum4 = _mm512_setzero_ps(); __m512 _sum5 = _mm512_setzero_ps(); __m512 _sum6 = _mm512_setzero_ps(); __m512 _sum7 = _mm512_setzero_ps(); __m512 _sum8 = _mm512_setzero_ps(); __m512 _sum9 = _mm512_setzero_ps(); __m512 _suma = _mm512_setzero_ps(); __m512 _sumb = _mm512_setzero_ps(); for (int j = 0; j < nn; j++) { __m512 _w0 = _mm512_load_ps(k0); __m512 _val0 = _mm512_set1_ps(r0[0]); __m512 _val1 = _mm512_set1_ps(r0[1]); _sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1); __m512 _val2 = _mm512_set1_ps(r0[2]); __m512 _val3 = _mm512_set1_ps(r0[3]); _sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3); __m512 _val4 = _mm512_set1_ps(r0[4]); __m512 _val5 = _mm512_set1_ps(r0[5]); _sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5); __m512 _val6 = _mm512_set1_ps(r0[6]); __m512 _val7 = _mm512_set1_ps(r0[7]); _sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7); __m512 _val8 = _mm512_set1_ps(r0[8]); __m512 _val9 = _mm512_set1_ps(r0[9]); _sum8 = _mm512_fmadd_ps(_val8, _w0, _sum8); _sum9 = _mm512_fmadd_ps(_val9, _w0, _sum9); __m512 _vala = _mm512_set1_ps(r0[10]); __m512 _valb = _mm512_set1_ps(r0[11]); _suma = _mm512_fmadd_ps(_vala, _w0, _suma); _sumb = _mm512_fmadd_ps(_valb, _w0, _sumb); r0 += 12; k0 += 16; } _mm512_store_ps(output0_tm, _sum0); _mm512_store_ps(output0_tm + 16, _sum1); _mm512_store_ps(output0_tm + 16 * 2, _sum2); _mm512_store_ps(output0_tm + 16 * 3, _sum3); _mm512_store_ps(output0_tm + 16 * 4, _sum4); _mm512_store_ps(output0_tm + 16 * 5, _sum5); _mm512_store_ps(output0_tm + 16 * 6, _sum6); _mm512_store_ps(output0_tm + 16 * 7, _sum7); _mm512_store_ps(output0_tm + 16 * 8, _sum8); _mm512_store_ps(output0_tm + 16 * 9, _sum9); _mm512_store_ps(output0_tm + 16 * 10, _suma); _mm512_store_ps(output0_tm + 16 * 11, _sumb); output0_tm += 16 * 12; } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k0 = kernel0_tm.row(r); int nn = inch * 16; // inch always > 0 __m512 _sum0 = _mm512_setzero_ps(); __m512 _sum1 = _mm512_setzero_ps(); __m512 _sum2 = _mm512_setzero_ps(); __m512 _sum3 = _mm512_setzero_ps(); __m512 _sum4 = _mm512_setzero_ps(); __m512 _sum5 = _mm512_setzero_ps(); __m512 _sum6 = _mm512_setzero_ps(); __m512 _sum7 = _mm512_setzero_ps(); for (int j = 0; j < nn; j++) { __m512 _w0 = _mm512_load_ps(k0); __m512 _val0 = _mm512_set1_ps(r0[0]); __m512 _val1 = _mm512_set1_ps(r0[1]); _sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1); __m512 _val2 = _mm512_set1_ps(r0[2]); __m512 _val3 = _mm512_set1_ps(r0[3]); _sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3); __m512 _val4 = _mm512_set1_ps(r0[4]); __m512 _val5 = _mm512_set1_ps(r0[5]); _sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5); __m512 _val6 = _mm512_set1_ps(r0[6]); __m512 _val7 = _mm512_set1_ps(r0[7]); _sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7); r0 += 8; k0 += 16; } _mm512_store_ps(output0_tm, _sum0); _mm512_store_ps(output0_tm + 16, _sum1); _mm512_store_ps(output0_tm + 16 * 2, _sum2); _mm512_store_ps(output0_tm + 16 * 3, _sum3); _mm512_store_ps(output0_tm + 16 * 4, _sum4); _mm512_store_ps(output0_tm + 16 * 5, _sum5); _mm512_store_ps(output0_tm + 16 * 6, _sum6); _mm512_store_ps(output0_tm + 16 * 7, _sum7); output0_tm += 16 * 8; } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k0 = kernel0_tm.row(r); int nn = inch * 16; // inch always > 0 __m512 _sum0 = _mm512_setzero_ps(); __m512 _sum1 = _mm512_setzero_ps(); __m512 _sum2 = _mm512_setzero_ps(); __m512 _sum3 = _mm512_setzero_ps(); for (int j = 0; j < nn; j++) { __m512 _w0 = _mm512_load_ps(k0); __m512 _val0 = _mm512_set1_ps(r0[0]); __m512 _val1 = _mm512_set1_ps(r0[1]); _sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1); __m512 _val2 = _mm512_set1_ps(r0[2]); __m512 _val3 = _mm512_set1_ps(r0[3]); _sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3); r0 += 4; k0 += 16; } _mm512_store_ps(output0_tm, _sum0); _mm512_store_ps(output0_tm + 16, _sum1); _mm512_store_ps(output0_tm + 16 * 2, _sum2); _mm512_store_ps(output0_tm + 16 * 3, _sum3); output0_tm += 16 * 4; } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 16; // inch always > 0 __m512 _sum0 = _mm512_setzero_ps(); __m512 _sum1 = _mm512_setzero_ps(); for (int j = 0; j < nn; j++) { __m512 _w0 = _mm512_load_ps(k0); __m512 _val0 = _mm512_set1_ps(r0[0]); __m512 _val1 = _mm512_set1_ps(r0[1]); _sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1); r0 += 2; k0 += 16; } _mm512_store_ps(output0_tm, _sum0); _mm512_store_ps(output0_tm + 16, _sum1); output0_tm += 16 * 2; } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 16; // inch always > 0 __m512 _sum0 = _mm512_setzero_ps(); for (int j = 0; j < nn; j++) { __m512 _w0 = _mm512_load_ps(k0); __m512 _val0 = _mm512_set1_ps(r0[0]); _sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0); r0 += 1; k0 += 16; } _mm512_store_ps(output0_tm, _sum0); output0_tm += 16; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { conv3x3s1_winograd64_transform_output_pack16_avx512(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd42_transform_kernel_pack16_avx512(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd42 transform kernel Mat kernel_tm(6 * 6, inch, outch); const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = 16b-16a-inch/16a-36-outch/16b kernel_tm_pack4.create(inch / 16, 36, outch / 16, (size_t)4u * 16 * 16, 16 * 16); for (int q = 0; q + 15 < outch; q += 16) { Mat g0 = kernel_tm_pack4.channel(q / 16); for (int k = 0; k < 36; k++) { float* g00 = g0.row<float>(k); for (int p = 0; p + 15 < inch; p += 16) { for (int i = 0; i < 16; i++) { for (int j = 0; j < 16; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = k00[k]; g00++; } } } } } } static void conv3x3s1_winograd42_pack16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 4; int h_tiles = outh / 4; const int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); conv3x3s1_winograd42_transform_input_pack16_avx512(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 11 < tiles; i += 12) { float* tmpptr = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 16; for (int q = 0; q < inch; q++) { // transpose 16x12 __m512 _r0 = _mm512_load_ps(r0); __m512 _r1 = _mm512_load_ps(r0 + 16); __m512 _r2 = _mm512_load_ps(r0 + 16 * 2); __m512 _r3 = _mm512_load_ps(r0 + 16 * 3); __m512 _r4 = _mm512_load_ps(r0 + 16 * 4); __m512 _r5 = _mm512_load_ps(r0 + 16 * 5); __m512 _r6 = _mm512_load_ps(r0 + 16 * 6); __m512 _r7 = _mm512_load_ps(r0 + 16 * 7); __m512 _r8 = _mm512_load_ps(r0 + 16 * 8); __m512 _r9 = _mm512_load_ps(r0 + 16 * 9); __m512 _ra = _mm512_load_ps(r0 + 16 * 10); __m512 _rb = _mm512_load_ps(r0 + 16 * 11); __m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1); __m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1); __m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3); __m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3); __m512 _tmp4 = _mm512_unpacklo_ps(_r4, _r5); __m512 _tmp5 = _mm512_unpackhi_ps(_r4, _r5); __m512 _tmp6 = _mm512_unpacklo_ps(_r6, _r7); __m512 _tmp7 = _mm512_unpackhi_ps(_r6, _r7); __m512 _tmp8 = _mm512_unpacklo_ps(_r8, _r9); __m512 _tmp9 = _mm512_unpackhi_ps(_r8, _r9); __m512 _tmpa = _mm512_unpacklo_ps(_ra, _rb); __m512 _tmpb = _mm512_unpackhi_ps(_ra, _rb); __m512 _tmpc = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmpd = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2)); __m512 _tmpe = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmpf = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2)); __m512 _tmpg = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmph = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2)); __m512 _tmpi = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmpj = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2)); __m512 _tmpk = _mm512_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmpl = _mm512_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(3, 2, 3, 2)); __m512 _tmpm = _mm512_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmpn = _mm512_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(3, 2, 3, 2)); _tmp0 = _mm512_shuffle_f32x4(_tmpc, _tmpg, _MM_SHUFFLE(2, 0, 2, 0)); _tmp1 = _mm512_shuffle_f32x4(_tmpk, _tmpd, _MM_SHUFFLE(2, 0, 2, 0)); _tmp2 = _mm512_shuffle_f32x4(_tmph, _tmpl, _MM_SHUFFLE(2, 0, 2, 0)); _tmp3 = _mm512_shuffle_f32x4(_tmpe, _tmpi, _MM_SHUFFLE(2, 0, 2, 0)); _tmp4 = _mm512_shuffle_f32x4(_tmpm, _tmpf, _MM_SHUFFLE(2, 0, 2, 0)); _tmp5 = _mm512_shuffle_f32x4(_tmpj, _tmpn, _MM_SHUFFLE(2, 0, 2, 0)); _tmp6 = _mm512_shuffle_f32x4(_tmpc, _tmpg, _MM_SHUFFLE(3, 1, 3, 1)); _tmp7 = _mm512_shuffle_f32x4(_tmpk, _tmpd, _MM_SHUFFLE(3, 1, 3, 1)); _tmp8 = _mm512_shuffle_f32x4(_tmph, _tmpl, _MM_SHUFFLE(3, 1, 3, 1)); _tmp9 = _mm512_shuffle_f32x4(_tmpe, _tmpi, _MM_SHUFFLE(3, 1, 3, 1)); _tmpa = _mm512_shuffle_f32x4(_tmpm, _tmpf, _MM_SHUFFLE(3, 1, 3, 1)); _tmpb = _mm512_shuffle_f32x4(_tmpj, _tmpn, _MM_SHUFFLE(3, 1, 3, 1)); _r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0)); _r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0)); _r2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0)); _r3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0)); _r4 = _mm512_shuffle_f32x4(_tmp8, _tmp9, _MM_SHUFFLE(2, 0, 2, 0)); _r5 = _mm512_shuffle_f32x4(_tmpa, _tmpb, _MM_SHUFFLE(2, 0, 2, 0)); _r6 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1)); _r7 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1)); _r8 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1)); _r9 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1)); _ra = _mm512_shuffle_f32x4(_tmp8, _tmp9, _MM_SHUFFLE(3, 1, 3, 1)); _rb = _mm512_shuffle_f32x4(_tmpa, _tmpb, _MM_SHUFFLE(3, 1, 3, 1)); _mm512_store_ps(tmpptr, _r0); _mm512_store_ps(tmpptr + 16, _r1); _mm512_store_ps(tmpptr + 16 * 2, _r2); _mm512_store_ps(tmpptr + 16 * 3, _r3); _mm512_store_ps(tmpptr + 16 * 4, _r4); _mm512_store_ps(tmpptr + 16 * 5, _r5); _mm512_store_ps(tmpptr + 16 * 6, _r6); _mm512_store_ps(tmpptr + 16 * 7, _r7); _mm512_store_ps(tmpptr + 16 * 8, _r8); _mm512_store_ps(tmpptr + 16 * 9, _r9); _mm512_store_ps(tmpptr + 16 * 10, _ra); _mm512_store_ps(tmpptr + 16 * 11, _rb); r0 += bottom_blob_tm.cstep * 16; tmpptr += 192; } } for (; i + 7 < tiles; i += 8) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 16; for (int q = 0; q < inch; q++) { // transpose 16x8 __m512 _r0 = _mm512_load_ps(r0); __m512 _r1 = _mm512_load_ps(r0 + 16); __m512 _r2 = _mm512_load_ps(r0 + 16 * 2); __m512 _r3 = _mm512_load_ps(r0 + 16 * 3); __m512 _r4 = _mm512_load_ps(r0 + 16 * 4); __m512 _r5 = _mm512_load_ps(r0 + 16 * 5); __m512 _r6 = _mm512_load_ps(r0 + 16 * 6); __m512 _r7 = _mm512_load_ps(r0 + 16 * 7); __m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1); __m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1); __m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3); __m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3); __m512 _tmp4 = _mm512_unpacklo_ps(_r4, _r5); __m512 _tmp5 = _mm512_unpackhi_ps(_r4, _r5); __m512 _tmp6 = _mm512_unpacklo_ps(_r6, _r7); __m512 _tmp7 = _mm512_unpackhi_ps(_r6, _r7); __m512 _tmp8 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmp9 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2)); __m512 _tmpa = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmpb = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2)); __m512 _tmpc = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmpd = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2)); __m512 _tmpe = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmpf = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2)); _tmp0 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(2, 0, 2, 0)); _tmp1 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(2, 0, 2, 0)); _tmp2 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(2, 0, 2, 0)); _tmp3 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(2, 0, 2, 0)); _tmp4 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(3, 1, 3, 1)); _tmp5 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(3, 1, 3, 1)); _tmp6 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(3, 1, 3, 1)); _tmp7 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(3, 1, 3, 1)); _r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0)); _r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0)); _r2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0)); _r3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0)); _r4 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1)); _r5 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1)); _r6 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1)); _r7 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1)); _mm512_store_ps(tmpptr, _r0); _mm512_store_ps(tmpptr + 16, _r1); _mm512_store_ps(tmpptr + 16 * 2, _r2); _mm512_store_ps(tmpptr + 16 * 3, _r3); _mm512_store_ps(tmpptr + 16 * 4, _r4); _mm512_store_ps(tmpptr + 16 * 5, _r5); _mm512_store_ps(tmpptr + 16 * 6, _r6); _mm512_store_ps(tmpptr + 16 * 7, _r7); r0 += bottom_blob_tm.cstep * 16; tmpptr += 128; } } for (; i + 3 < tiles; i += 4) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 16; for (int q = 0; q < inch; q++) { // transpose 16x4 __m512 _r0 = _mm512_load_ps(r0); __m512 _r1 = _mm512_load_ps(r0 + 16); __m512 _r2 = _mm512_load_ps(r0 + 16 * 2); __m512 _r3 = _mm512_load_ps(r0 + 16 * 3); __m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1); __m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1); __m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3); __m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3); __m512 _tmp4 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmp5 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2)); __m512 _tmp6 = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0)); __m512 _tmp7 = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2)); _tmp0 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0)); _tmp1 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0)); _tmp2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1)); _tmp3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1)); _r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0)); _r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0)); _r2 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1)); _r3 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1)); _mm512_store_ps(tmpptr, _r0); _mm512_store_ps(tmpptr + 16, _r1); _mm512_store_ps(tmpptr + 16 * 2, _r2); _mm512_store_ps(tmpptr + 16 * 3, _r3); r0 += bottom_blob_tm.cstep * 16; tmpptr += 64; } } for (; i + 1 < tiles; i += 2) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 16; for (int q = 0; q < inch; q++) { // transpose 16x2 __m512 _r0 = _mm512_load_ps(r0); __m512 _r1 = _mm512_load_ps(r0 + 16); __m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1); __m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1); __m512 _tmp2 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0)); __m512 _tmp3 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1)); _r0 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0)); _r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1)); _mm512_store_ps(tmpptr, _r0); _mm512_store_ps(tmpptr + 16, _r1); r0 += bottom_blob_tm.cstep * 16; tmpptr += 32; } } for (; i < tiles; i++) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 16; for (int q = 0; q < inch; q++) { __m512 _val = _mm512_load_ps(r0); _mm512_store_ps(tmpptr, _val); r0 += bottom_blob_tm.cstep * 16; tmpptr += 16; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch * 16; // inch always > 0 __m512 _sum0 = _mm512_setzero_ps(); __m512 _sum1 = _mm512_setzero_ps(); __m512 _sum2 = _mm512_setzero_ps(); __m512 _sum3 = _mm512_setzero_ps(); __m512 _sum4 = _mm512_setzero_ps(); __m512 _sum5 = _mm512_setzero_ps(); __m512 _sum6 = _mm512_setzero_ps(); __m512 _sum7 = _mm512_setzero_ps(); __m512 _sum8 = _mm512_setzero_ps(); __m512 _sum9 = _mm512_setzero_ps(); __m512 _suma = _mm512_setzero_ps(); __m512 _sumb = _mm512_setzero_ps(); for (int j = 0; j < nn; j++) { __m512 _w0 = _mm512_load_ps(k0); __m512 _val0 = _mm512_set1_ps(r0[0]); __m512 _val1 = _mm512_set1_ps(r0[1]); _sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1); __m512 _val2 = _mm512_set1_ps(r0[2]); __m512 _val3 = _mm512_set1_ps(r0[3]); _sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3); __m512 _val4 = _mm512_set1_ps(r0[4]); __m512 _val5 = _mm512_set1_ps(r0[5]); _sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5); __m512 _val6 = _mm512_set1_ps(r0[6]); __m512 _val7 = _mm512_set1_ps(r0[7]); _sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7); __m512 _val8 = _mm512_set1_ps(r0[8]); __m512 _val9 = _mm512_set1_ps(r0[9]); _sum8 = _mm512_fmadd_ps(_val8, _w0, _sum8); _sum9 = _mm512_fmadd_ps(_val9, _w0, _sum9); __m512 _vala = _mm512_set1_ps(r0[10]); __m512 _valb = _mm512_set1_ps(r0[11]); _suma = _mm512_fmadd_ps(_vala, _w0, _suma); _sumb = _mm512_fmadd_ps(_valb, _w0, _sumb); r0 += 12; k0 += 16; } _mm512_store_ps(output0_tm, _sum0); _mm512_store_ps(output0_tm + 16, _sum1); _mm512_store_ps(output0_tm + 16 * 2, _sum2); _mm512_store_ps(output0_tm + 16 * 3, _sum3); _mm512_store_ps(output0_tm + 16 * 4, _sum4); _mm512_store_ps(output0_tm + 16 * 5, _sum5); _mm512_store_ps(output0_tm + 16 * 6, _sum6); _mm512_store_ps(output0_tm + 16 * 7, _sum7); _mm512_store_ps(output0_tm + 16 * 8, _sum8); _mm512_store_ps(output0_tm + 16 * 9, _sum9); _mm512_store_ps(output0_tm + 16 * 10, _suma); _mm512_store_ps(output0_tm + 16 * 11, _sumb); output0_tm += 16 * 12; } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k0 = kernel0_tm.row(r); int nn = inch * 16; // inch always > 0 __m512 _sum0 = _mm512_setzero_ps(); __m512 _sum1 = _mm512_setzero_ps(); __m512 _sum2 = _mm512_setzero_ps(); __m512 _sum3 = _mm512_setzero_ps(); __m512 _sum4 = _mm512_setzero_ps(); __m512 _sum5 = _mm512_setzero_ps(); __m512 _sum6 = _mm512_setzero_ps(); __m512 _sum7 = _mm512_setzero_ps(); for (int j = 0; j < nn; j++) { __m512 _w0 = _mm512_load_ps(k0); __m512 _val0 = _mm512_set1_ps(r0[0]); __m512 _val1 = _mm512_set1_ps(r0[1]); _sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1); __m512 _val2 = _mm512_set1_ps(r0[2]); __m512 _val3 = _mm512_set1_ps(r0[3]); _sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3); __m512 _val4 = _mm512_set1_ps(r0[4]); __m512 _val5 = _mm512_set1_ps(r0[5]); _sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5); __m512 _val6 = _mm512_set1_ps(r0[6]); __m512 _val7 = _mm512_set1_ps(r0[7]); _sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7); r0 += 8; k0 += 16; } _mm512_store_ps(output0_tm, _sum0); _mm512_store_ps(output0_tm + 16, _sum1); _mm512_store_ps(output0_tm + 16 * 2, _sum2); _mm512_store_ps(output0_tm + 16 * 3, _sum3); _mm512_store_ps(output0_tm + 16 * 4, _sum4); _mm512_store_ps(output0_tm + 16 * 5, _sum5); _mm512_store_ps(output0_tm + 16 * 6, _sum6); _mm512_store_ps(output0_tm + 16 * 7, _sum7); output0_tm += 16 * 8; } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k0 = kernel0_tm.row(r); int nn = inch * 16; // inch always > 0 __m512 _sum0 = _mm512_setzero_ps(); __m512 _sum1 = _mm512_setzero_ps(); __m512 _sum2 = _mm512_setzero_ps(); __m512 _sum3 = _mm512_setzero_ps(); for (int j = 0; j < nn; j++) { __m512 _w0 = _mm512_load_ps(k0); __m512 _val0 = _mm512_set1_ps(r0[0]); __m512 _val1 = _mm512_set1_ps(r0[1]); _sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1); __m512 _val2 = _mm512_set1_ps(r0[2]); __m512 _val3 = _mm512_set1_ps(r0[3]); _sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3); r0 += 4; k0 += 16; } _mm512_store_ps(output0_tm, _sum0); _mm512_store_ps(output0_tm + 16, _sum1); _mm512_store_ps(output0_tm + 16 * 2, _sum2); _mm512_store_ps(output0_tm + 16 * 3, _sum3); output0_tm += 16 * 4; } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 16; // inch always > 0 __m512 _sum0 = _mm512_setzero_ps(); __m512 _sum1 = _mm512_setzero_ps(); for (int j = 0; j < nn; j++) { __m512 _w0 = _mm512_load_ps(k0); __m512 _val0 = _mm512_set1_ps(r0[0]); __m512 _val1 = _mm512_set1_ps(r0[1]); _sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1); r0 += 2; k0 += 16; } _mm512_store_ps(output0_tm, _sum0); _mm512_store_ps(output0_tm + 16, _sum1); output0_tm += 16 * 2; } for (; i < tiles; i++) { const float* r0 = bb2.row<const float>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k0 = kernel0_tm.row<const float>(r); int nn = inch * 16; // inch always > 0 __m512 _sum0 = _mm512_setzero_ps(); for (int j = 0; j < nn; j++) { __m512 _w0 = _mm512_load_ps(k0); __m512 _val0 = _mm512_set1_ps(r0[0]); _sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0); r0 += 1; k0 += 16; } _mm512_store_ps(output0_tm, _sum0); output0_tm += 16; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { conv3x3s1_winograd42_transform_output_pack16_avx512(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
stream.c
/*-----------------------------------------------------------------------*/ /* Program: STREAM */ /* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */ /* Original code developed by John D. McCalpin */ /* Programmers: John D. McCalpin */ /* Joe R. Zagar */ /* */ /* This program measures memory transfer rates in MB/s for simple */ /* computational kernels coded in C. */ /*-----------------------------------------------------------------------*/ /* Copyright 1991-2013: John D. McCalpin */ /*-----------------------------------------------------------------------*/ /* License: */ /* 1. You are free to use this program and/or to redistribute */ /* this program. */ /* 2. You are free to modify this program for your own use, */ /* including commercial use, subject to the publication */ /* restrictions in item 3. */ /* 3. You are free to publish results obtained from running this */ /* program, or from works that you derive from this program, */ /* with the following limitations: */ /* 3a. In order to be referred to as "STREAM benchmark results", */ /* published results must be in conformance to the STREAM */ /* Run Rules, (briefly reviewed below) published at */ /* http://www.cs.virginia.edu/stream/ref.html */ /* and incorporated herein by reference. */ /* As the copyright holder, John McCalpin retains the */ /* right to determine conformity with the Run Rules. */ /* 3b. Results based on modified source code or on runs not in */ /* accordance with the STREAM Run Rules must be clearly */ /* labelled whenever they are published. Examples of */ /* proper labelling include: */ /* "tuned STREAM benchmark results" */ /* "based on a variant of the STREAM benchmark code" */ /* Other comparable, clear, and reasonable labelling is */ /* acceptable. */ /* 3c. Submission of results to the STREAM benchmark web site */ /* is encouraged, but not required. */ /* 4. Use of this program or creation of derived works based on this */ /* program constitutes acceptance of these licensing restrictions. */ /* 5. Absolutely no warranty is expressed or implied. */ /*-----------------------------------------------------------------------*/ # include <stdio.h> # include <unistd.h> # include <math.h> # include <float.h> # include <limits.h> # include <sys/time.h> /*----------------------------------------------------------------------- * INSTRUCTIONS: * * 1) STREAM requires different amounts of memory to run on different * systems, depending on both the system cache size(s) and the * granularity of the system timer. * You should adjust the value of 'STREAM_ARRAY_SIZE' (below) * to meet *both* of the following criteria: * (a) Each array must be at least 4 times the size of the * available cache memory. I don't worry about the difference * between 10^6 and 2^20, so in practice the minimum array size * is about 3.8 times the cache size. * Example 1: One Xeon E3 with 8 MB L3 cache * STREAM_ARRAY_SIZE should be >= 4 million, giving * an array size of 30.5 MB and a total memory requirement * of 91.5 MB. * Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP) * STREAM_ARRAY_SIZE should be >= 20 million, giving * an array size of 153 MB and a total memory requirement * of 458 MB. * (b) The size should be large enough so that the 'timing calibration' * output by the program is at least 20 clock-ticks. * Example: most versions of Windows have a 10 millisecond timer * granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds. * If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec. * This means the each array must be at least 1 GB, or 128M elements. * * Version 5.10 increases the default array size from 2 million * elements to 10 million elements in response to the increasing * size of L3 caches. The new default size is large enough for caches * up to 20 MB. * Version 5.10 changes the loop index variables from "register int" * to "ssize_t", which allows array indices >2^32 (4 billion) * on properly configured 64-bit systems. Additional compiler options * (such as "-mcmodel=medium") may be required for large memory runs. * * Array size can be set at compile time without modifying the source * code for the (many) compilers that support preprocessor definitions * on the compile line. E.g., * gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M * will override the default size of 10M with a new size of 100M elements * per array. */ #ifndef STREAM_ARRAY_SIZE # define STREAM_ARRAY_SIZE 10000000 #endif /* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result * for any iteration after the first, therefore the minimum value * for NTIMES is 2. * There are no rules on maximum allowable values for NTIMES, but * values larger than the default are unlikely to noticeably * increase the reported performance. * NTIMES can also be set on the compile line without changing the source * code using, for example, "-DNTIMES=7". */ #ifdef NTIMES #if NTIMES<=1 # define NTIMES 10 #endif #endif #ifndef NTIMES # define NTIMES 10 #endif /* Users are allowed to modify the "OFFSET" variable, which *may* change the * relative alignment of the arrays (though compilers may change the * effective offset by making the arrays non-contiguous on some systems). * Use of non-zero values for OFFSET can be especially helpful if the * STREAM_ARRAY_SIZE is set to a value close to a large power of 2. * OFFSET can also be set on the compile line without changing the source * code using, for example, "-DOFFSET=56". */ #ifndef OFFSET # define OFFSET 0 #endif /* * 3) Compile the code with optimization. Many compilers generate * unreasonably bad code before the optimizer tightens things up. * If the results are unreasonably good, on the other hand, the * optimizer might be too smart for me! * * For a simple single-core version, try compiling with: * cc -O stream.c -o stream * This is known to work on many, many systems.... * * To use multiple cores, you need to tell the compiler to obey the OpenMP * directives in the code. This varies by compiler, but a common example is * gcc -O -fopenmp stream.c -o stream_omp * The environment variable OMP_NUM_THREADS allows runtime control of the * number of threads/cores used when the resulting "stream_omp" program * is executed. * * To run with single-precision variables and arithmetic, simply add * -DSTREAM_TYPE=float * to the compile line. * Note that this changes the minimum array sizes required --- see (1) above. * * The preprocessor directive "TUNED" does not do much -- it simply causes the * code to call separate functions to execute each kernel. Trivial versions * of these functions are provided, but they are *not* tuned -- they just * provide predefined interfaces to be replaced with tuned code. * * * 4) Optional: Mail the results to mccalpin@cs.virginia.edu * Be sure to include info that will help me understand: * a) the computer hardware configuration (e.g., processor model, memory type) * b) the compiler name/version and compilation flags * c) any run-time information (such as OMP_NUM_THREADS) * d) all of the output from the test case. * * Thanks! * *-----------------------------------------------------------------------*/ # define HLINE "-------------------------------------------------------------\n" # ifndef MIN # define MIN(x,y) ((x)<(y)?(x):(y)) # endif # ifndef MAX # define MAX(x,y) ((x)>(y)?(x):(y)) # endif #ifndef STREAM_TYPE #define STREAM_TYPE double #endif static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET], b[STREAM_ARRAY_SIZE+OFFSET], c[STREAM_ARRAY_SIZE+OFFSET]; static double avgtime[4] = {0}, maxtime[4] = {0}, mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX}; static char *label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; static double bytes[4] = { 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE }; extern double mysecond(); extern void checkSTREAMresults(); #ifdef TUNED extern void tuned_STREAM_Copy(); extern void tuned_STREAM_Scale(STREAM_TYPE scalar); extern void tuned_STREAM_Add(); extern void tuned_STREAM_Triad(STREAM_TYPE scalar); #endif #ifdef _OPENMP extern int omp_get_num_threads(); #endif int main() { int quantum, checktick(); int BytesPerWord; int k; ssize_t j; STREAM_TYPE scalar; double t, times[4][NTIMES]; /* --- SETUP --- determine precision and check timing --- */ printf(HLINE); printf("STREAM version $Revision: 5.10 $\n"); printf(HLINE); BytesPerWord = sizeof(STREAM_TYPE); printf("This system uses %d bytes per array element.\n", BytesPerWord); printf(HLINE); #ifdef N printf("***** WARNING: ******\n"); printf(" It appears that you set the preprocessor variable N when compiling this code.\n"); printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n"); printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE); printf("***** WARNING: ******\n"); #endif printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET); printf("Memory per array = %.1f MiB (= %.1f GiB).\n", BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0), BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0)); printf("Total memory required = %.1f MiB (= %.1f GiB).\n", (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.), (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.)); printf("Each kernel will be executed %d times.\n", NTIMES); printf(" The *best* time for each kernel (excluding the first iteration)\n"); printf(" will be used to compute the reported bandwidth.\n"); #ifdef _OPENMP printf(HLINE); #pragma omp parallel { #pragma omp master { k = omp_get_num_threads(); printf ("Number of Threads requested = %i\n",k); } } #endif #ifdef _OPENMP k = 0; #pragma omp parallel #pragma omp atomic k++; printf ("Number of Threads counted = %i\n",k); #endif /* Get initial value for system clock. */ #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) { a[j] = 1.0; b[j] = 2.0; c[j] = 0.0; } printf(HLINE); if ( (quantum = checktick()) >= 1) printf("Your clock granularity/precision appears to be " "%d microseconds.\n", quantum); else { printf("Your clock granularity appears to be " "less than one microsecond.\n"); quantum = 1; } t = mysecond(); #pragma omp parallel for for (j = 0; j < STREAM_ARRAY_SIZE; j++) a[j] = 2.0E0 * a[j]; t = 1.0E6 * (mysecond() - t); printf("Each test below will take on the order" " of %d microseconds.\n", (int) t ); printf(" (= %d clock ticks)\n", (int) (t/quantum) ); printf("Increase the size of the arrays if this shows that\n"); printf("you are not getting at least 20 clock ticks per test.\n"); printf(HLINE); printf("WARNING -- The above is only a rough guideline.\n"); printf("For best results, please be sure you know the\n"); printf("precision of your system timer.\n"); printf(HLINE); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ scalar = 3.0; for (k=0; k<NTIMES; k++) { times[0][k] = mysecond(); #ifdef TUNED tuned_STREAM_Copy(); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; #endif times[0][k] = mysecond() - times[0][k]; times[1][k] = mysecond(); #ifdef TUNED tuned_STREAM_Scale(scalar); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; #endif times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #ifdef TUNED tuned_STREAM_Add(); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; #endif times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #ifdef TUNED tuned_STREAM_Triad(scalar); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; #endif times[3][k] = mysecond() - times[3][k]; } /* --- SUMMARY --- */ for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { for (j=0; j<4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } } printf("Function Best Rate MB/s Avg time Min time Max time\n"); for (j=0; j<4; j++) { avgtime[j] = avgtime[j]/(double)(NTIMES-1); printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j], 1.0E-06 * bytes[j]/mintime[j], avgtime[j], mintime[j], maxtime[j]); } printf(HLINE); /* --- Check Results --- */ checkSTREAMresults(); printf(HLINE); return 0; } # define M 20 int checktick() { int i, minDelta, Delta; double t1, t2, timesfound[M]; /* Collect a sequence of M unique time values from the system. */ for (i = 0; i < M; i++) { t1 = mysecond(); while( ((t2=mysecond()) - t1) < 1.0E-6 ) ; timesfound[i] = t1 = t2; } /* * Determine the minimum difference between these M values. * This result will be our estimate (in microseconds) for the * clock granularity. */ minDelta = 1000000; for (i = 1; i < M; i++) { Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1])); minDelta = MIN(minDelta, MAX(Delta,0)); } return(minDelta); } /* A gettimeofday routine to give access to the wall clock timer on most UNIX-like systems. */ #include <sys/time.h> double mysecond() { struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } #ifndef abs #define abs(a) ((a) >= 0 ? (a) : -(a)) #endif void checkSTREAMresults () { STREAM_TYPE aj,bj,cj,scalar; STREAM_TYPE aSumErr,bSumErr,cSumErr; STREAM_TYPE aAvgErr,bAvgErr,cAvgErr; double epsilon; ssize_t j; int k,ierr,err; /* reproduce initialization */ aj = 1.0; bj = 2.0; cj = 0.0; /* a[] is modified during timing check */ aj = 2.0E0 * aj; /* now execute timing loop */ scalar = 3.0; for (k=0; k<NTIMES; k++) { cj = aj; bj = scalar*cj; cj = aj+bj; aj = bj+scalar*cj; } /* accumulate deltas between observed and expected results */ aSumErr = 0.0; bSumErr = 0.0; cSumErr = 0.0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { aSumErr += abs(a[j] - aj); bSumErr += abs(b[j] - bj); cSumErr += abs(c[j] - cj); // if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN } aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; if (sizeof(STREAM_TYPE) == 4) { epsilon = 1.e-6; } else if (sizeof(STREAM_TYPE) == 8) { epsilon = 1.e-13; } else { printf("WEIRD: sizeof(STREAM_TYPE) = %d\n",sizeof(STREAM_TYPE)); epsilon = 1.e-6; } err = 0; if (abs(aAvgErr/aj) > epsilon) { err++; printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(a[j]/aj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,aj,a[j],abs((aj-a[j])/aAvgErr)); } #endif } } printf(" For array a[], %d errors were found.\n",ierr); } if (abs(bAvgErr/bj) > epsilon) { err++; printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(b[j]/bj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,bj,b[j],abs((bj-b[j])/bAvgErr)); } #endif } } printf(" For array b[], %d errors were found.\n",ierr); } if (abs(cAvgErr/cj) > epsilon) { err++; printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(c[j]/cj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,cj,c[j],abs((cj-c[j])/cAvgErr)); } #endif } } printf(" For array c[], %d errors were found.\n",ierr); } if (err == 0) { printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon); } #ifdef VERBOSE printf ("Results Validation Verbose Results: \n"); printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj); printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]); printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj)); #endif } #ifdef TUNED /* stubs for "tuned" versions of the kernels */ void tuned_STREAM_Copy() { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; } void tuned_STREAM_Scale(STREAM_TYPE scalar) { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; } void tuned_STREAM_Add() { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; } void tuned_STREAM_Triad(STREAM_TYPE scalar) { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; } /* end of stubs for the "tuned" versions of the kernels */ #endif
interpolate_v2_op.h
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <algorithm> #include <string> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/pten/core/hostdevice.h" #include "paddle/pten/kernels/funcs/math_function.h" namespace paddle { namespace operators { template <typename T, size_t D, int MajorType = Eigen::RowMajor, typename IndexType = Eigen::DenseIndex> using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>; using Tensor = framework::Tensor; using DataLayout = framework::DataLayout; inline std::vector<int> get_new_shape( const std::vector<const Tensor*>& list_new_shape_tensor) { // get tensor from std::vector<int> vec_new_shape; for (size_t i = 0; i < list_new_shape_tensor.size(); ++i) { auto tensor = list_new_shape_tensor[i]; PADDLE_ENFORCE_EQ(tensor->dims(), pten::make_ddim({1}), platform::errors::InvalidArgument( "The shape of dimension tensor should be [1]," "but received d%.", tensor->dims())); if (platform::is_gpu_place(tensor->place())) { framework::Tensor temp; paddle::framework::TensorCopySync(*tensor, platform::CPUPlace(), &temp); vec_new_shape.push_back(static_cast<int32_t>(*temp.data<int32_t>())); } else { vec_new_shape.push_back(static_cast<int32_t>(*tensor->data<int32_t>())); } } return vec_new_shape; } template <typename T> inline std::vector<T> get_new_data_from_tensor(const Tensor* new_data_tensor) { std::vector<T> vec_new_data; auto* new_data = new_data_tensor->data<T>(); framework::Tensor cpu_starts_tensor; if (platform::is_gpu_place(new_data_tensor->place())) { paddle::framework::TensorCopySync(*new_data_tensor, platform::CPUPlace(), &cpu_starts_tensor); new_data = cpu_starts_tensor.data<T>(); } #ifdef PADDLE_WITH_ASCEND_CL if (platform::is_npu_place(new_data_tensor->place())) { paddle::framework::TensorCopySync(*new_data_tensor, platform::CPUPlace(), &cpu_starts_tensor); new_data = cpu_starts_tensor.data<T>(); } #endif vec_new_data = std::vector<T>(new_data, new_data + new_data_tensor->numel()); return vec_new_data; } inline void ExtractNCDWH(const framework::DDim& dims, const DataLayout& data_layout, int* N, int* C, int* D, int* H, int* W) { *N = dims[0]; if (dims.size() == 3) { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[2]; *D = 1; *H = 1; *W = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; } else if (dims.size() == 4) { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[3]; *D = 1; *H = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; *W = data_layout == DataLayout::kNCHW ? dims[3] : dims[2]; } else { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[4]; *D = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; *H = data_layout == DataLayout::kNCHW ? dims[3] : dims[2]; *W = data_layout == DataLayout::kNCHW ? dims[4] : dims[3]; } } template <typename T> static void NearestNeighborInterpolate(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout& data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); for (int k = 0; k < out_h; k++) { // loop for images int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { output_t(i, j, k, l) = input_t(i, j, in_k, in_l); } else { output_t(i, k, l, j) = input_t(i, in_k, in_l, j); } } } } } } template <typename T> static void NearestNeighbor3DInterpolate( const Tensor& input, Tensor* output, const float ratio_d, const float ratio_h, const float ratio_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const DataLayout& data_layout) { auto input_t = EigenTensor<T, 5>::From(input); auto output_t = EigenTensor<T, 5>::From(*output); for (int d = 0; d < out_d; d++) { // loop for images int in_d = (align_corners) ? static_cast<int>(ratio_d * d + 0.5) : static_cast<int>(ratio_d * d); for (int k = 0; k < out_h; k++) { int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { output_t(i, j, d, k, l) = input_t(i, j, in_d, in_k, in_l); } else { // NDHWC output_t(i, d, k, l, j) = input_t(i, in_d, in_k, in_l, j); } } } } } } } template <typename T> static void LinearInterpolation(const Tensor& input, Tensor* output, const float ratio_w, const int in_w, const int n, const int c, const int out_w, const bool align_corners, const bool align_mode, const DataLayout data_layout) { auto input_t = EigenTensor<T, 3>::From(input); auto output_t = EigenTensor<T, 3>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; // w int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda float d_e = 1.f - d_w; // w2lambda { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(3) #endif for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels for (int l = 0; l < out_w; l++) { // linear interpolation T out_t; if (data_layout == DataLayout::kNCHW) { out_t = input_t(i, j, vx_w[l]) * vd_e[l] + input_t(i, j, vx_e[l]) * vd_w[l]; output_t(i, j, l) = out_t; } else { out_t = input_t(i, vx_w[l], j) * vd_e[l] + input_t(i, vx_e[l], j) * vd_w[l]; output_t(i, l, j) = out_t; } } } } } template <typename T> static void LinearInterpolationGrad(const Tensor& output_grad, Tensor* input_grad, const float ratio_w, const int in_w, const int n, const int c, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 3>::From(*input_grad); auto output_grad_t = EigenTensor<T, 3>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; // w int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda float d_e = 1.f - d_w; // w2lambda for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // linear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(i, j, l); input_grad_t(i, j, x_w) += static_cast<T>(grad * d_e); input_grad_t(i, j, x_e) += static_cast<T>(grad * d_w); } else { const T grad = output_grad_t(i, l, j); input_grad_t(i, x_w, j) += static_cast<T>(grad * d_e); input_grad_t(i, x_e, j) += static_cast<T>(grad * d_w); } } } } } template <typename T> static void BilinearInterpolation(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const bool align_mode, const DataLayout data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vy_n, vy_s; std::vector<float> vd_n, vd_s; vy_n.reserve(out_h); vy_s.reserve(out_h); vd_n.reserve(out_h); vd_s.reserve(out_h); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int k = 0; k < out_h; k++) { int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; { vy_n[k] = y_n; vy_s[k] = y_s; vd_n[k] = d_n; vd_s[k] = d_s; } } std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = (align_mode == 0 && !align_corners) ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(4) #endif for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels for (int k = 0; k < out_h; k++) { // loop for images for (int l = 0; l < out_w; l++) { // bilinear interpolation T out_t; if (data_layout == DataLayout::kNCHW) { out_t = input_t(i, j, vy_n[k], vx_w[l]) * vd_s[k] * vd_e[l] + input_t(i, j, vy_s[k], vx_w[l]) * vd_n[k] * vd_e[l] + input_t(i, j, vy_n[k], vx_e[l]) * vd_s[k] * vd_w[l] + input_t(i, j, vy_s[k], vx_e[l]) * vd_n[k] * vd_w[l]; output_t(i, j, k, l) = out_t; } else { out_t = input_t(i, vy_n[k], vx_w[l], j) * vd_s[k] * vd_e[l] + input_t(i, vy_s[k], vx_w[l], j) * vd_n[k] * vd_e[l] + input_t(i, vy_n[k], vx_e[l], j) * vd_s[k] * vd_w[l] + input_t(i, vy_s[k], vx_e[l], j) * vd_n[k] * vd_w[l]; output_t(i, k, l, j) = out_t; } } } } } } template <typename T> static void TrilinearInterpolation( const Tensor& input, Tensor* output, const float ratio_d, const float ratio_h, const float ratio_w, const int in_d, const int in_h, const int in_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const bool align_mode, const DataLayout& data_layout) { auto input_t = EigenTensor<T, 5>::From(input); auto output_t = EigenTensor<T, 5>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vt_f, vt_b; std::vector<float> vd_f, vd_b; vt_f.reserve(out_d); vt_b.reserve(out_d); vd_f.reserve(out_d); vd_b.reserve(out_d); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int j = 0; j < out_d; j++) { int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5) : static_cast<int>(ratio_d * j); t_f = (t_f > 0) ? t_f : 0; int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1); float idx_src_t = ratio_d * (j + 0.5) - 0.5; idx_src_t = (idx_src_t > 0) ? idx_src_t : 0; float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f; float d_b = 1.f - d_f; { vt_f[j] = t_f; vt_b[j] = t_b; vd_f[j] = d_f; vd_b[j] = d_b; } } std::vector<int> vy_n, vy_s; std::vector<float> vd_n, vd_s; vy_n.reserve(out_h); vy_s.reserve(out_h); vd_n.reserve(out_h); vd_s.reserve(out_h); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int k = 0; k < out_h; k++) { int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; { vy_n[k] = y_n; vy_s[k] = y_s; vd_n[k] = d_n; vd_s[k] = d_s; } } std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = (align_mode == 0 && !align_corners) ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(5) #endif for (int b = 0; b < n; b++) { // loop for batches for (int i = 0; i < c; i++) { // loop for channels for (int j = 0; j < out_d; j++) { // loop for D, H, W for (int k = 0; k < out_h; k++) { for (int l = 0; l < out_w; l++) { // trilinear interpolation if (data_layout == DataLayout::kNCHW) { T out_t = input_t(b, i, vt_f[j], vy_n[k], vx_w[l]) * vd_b[j] * vd_s[k] * vd_e[l] + input_t(b, i, vt_f[j], vy_n[k], vx_e[l]) * vd_b[j] * vd_s[k] * vd_w[l] + input_t(b, i, vt_f[j], vy_s[k], vx_w[l]) * vd_b[j] * vd_n[k] * vd_e[l] + input_t(b, i, vt_f[j], vy_s[k], vx_e[l]) * vd_b[j] * vd_n[k] * vd_w[l] + input_t(b, i, vt_b[j], vy_n[k], vx_w[l]) * vd_f[j] * vd_s[k] * vd_e[l] + input_t(b, i, vt_b[j], vy_n[k], vx_e[l]) * vd_f[j] * vd_s[k] * vd_w[l] + input_t(b, i, vt_b[j], vy_s[k], vx_w[l]) * vd_f[j] * vd_n[k] * vd_e[l] + input_t(b, i, vt_b[j], vy_s[k], vx_e[l]) * vd_f[j] * vd_n[k] * vd_w[l]; output_t(b, i, j, k, l) = out_t; } else { T out_t = input_t(b, vt_f[j], vy_n[k], vx_w[l], i) * vd_b[j] * vd_s[k] * vd_e[l] + input_t(b, vt_f[j], vy_n[k], vx_e[l], i) * vd_b[j] * vd_s[k] * vd_w[l] + input_t(b, vt_f[j], vy_s[k], vx_w[l], i) * vd_b[j] * vd_n[k] * vd_e[l] + input_t(b, vt_f[j], vy_s[k], vx_e[l], i) * vd_b[j] * vd_n[k] * vd_w[l] + input_t(b, vt_b[j], vy_n[k], vx_w[l], i) * vd_f[j] * vd_s[k] * vd_e[l] + input_t(b, vt_b[j], vy_n[k], vx_e[l], i) * vd_f[j] * vd_s[k] * vd_w[l] + input_t(b, vt_b[j], vy_s[k], vx_w[l], i) * vd_f[j] * vd_n[k] * vd_e[l] + input_t(b, vt_b[j], vy_s[k], vx_e[l], i) * vd_f[j] * vd_n[k] * vd_w[l]; output_t(b, j, k, l, i) = out_t; } } } } } } } template <typename T> HOSTDEVICE inline T cubic_convolution1(T x, T A) { return ((A + 2) * x - (A + 3)) * x * x + 1; } template <typename T> HOSTDEVICE inline T cubic_convolution2(T x, T A) { return ((A * x - 5 * A) * x + 8 * A) * x - 4 * A; } template <typename T> HOSTDEVICE inline void get_cubic_upsample_coefficients(T coeffs[4], T t) { T A = -0.75; T x1 = t; coeffs[0] = cubic_convolution2<T>(x1 + 1.0, A); coeffs[1] = cubic_convolution1<T>(x1, A); // opposite coefficients T x2 = 1.0 - t; coeffs[2] = cubic_convolution1<T>(x2, A); coeffs[3] = cubic_convolution2<T>(x2 + 1.0, A); } template <typename T> static inline T cubic_interp(T x0, T x1, T x2, T x3, T t) { T coeffs[4]; get_cubic_upsample_coefficients<T>(coeffs, t); return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3]; } template <typename T> static void BicubicInterpolation(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); for (int k = 0; k < out_h; k++) { // loop for images T y_n = align_corners ? static_cast<T>(ratio_h * k) : static_cast<T>(ratio_h * (k + 0.5) - 0.5); int input_y = floorf(y_n); const T y_t = y_n - input_y; for (int l = 0; l < out_w; l++) { T x_n = align_corners ? static_cast<T>(ratio_w * l) : static_cast<T>(ratio_w * (l + 0.5) - 0.5); int input_x = floorf(x_n); const T x_t = x_n - input_x; for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels T coefficients[4]; // interp 4 times in x direction for (int ii = 0; ii < 4; ii++) { int access_y = std::max(std::min(input_y - 1 + ii, in_h - 1), static_cast<int>(0)); int access_x_0 = std::max(std::min(input_x - 1, in_w - 1), static_cast<int>(0)); int access_x_1 = std::max(std::min(input_x + 0, in_w - 1), static_cast<int>(0)); int access_x_2 = std::max(std::min(input_x + 1, in_w - 1), static_cast<int>(0)); int access_x_3 = std::max(std::min(input_x + 2, in_w - 1), static_cast<int>(0)); if (data_layout == DataLayout::kNCHW) { coefficients[ii] = cubic_interp<T>(input_t(i, j, access_y, access_x_0), input_t(i, j, access_y, access_x_1), input_t(i, j, access_y, access_x_2), input_t(i, j, access_y, access_x_3), x_t); } else { coefficients[ii] = cubic_interp<T>(input_t(i, access_y, access_x_0, j), input_t(i, access_y, access_x_1, j), input_t(i, access_y, access_x_2, j), input_t(i, access_y, access_x_3, j), x_t); } } // interp y direction if (data_layout == DataLayout::kNCHW) { output_t(i, j, k, l) = cubic_interp<T>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t); } else { output_t(i, k, l, j) = cubic_interp<T>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t); } } } } } } template <typename T> static void NearestNeighborInterpolateGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); for (int k = 0; k < out_h; k++) { // loop for images int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { input_grad_t(i, j, in_k, in_l) += output_grad_t(i, j, k, l); } else { input_grad_t(i, in_k, in_l, j) += output_grad_t(i, k, l, j); } } } } } } template <typename T> static void NearestNeighbor3DInterpolateGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_d, const float ratio_h, const float ratio_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 5>::From(*input_grad); auto output_grad_t = EigenTensor<T, 5>::From(output_grad); for (int d = 0; d < out_d; d++) { int in_d = (align_corners) ? static_cast<int>(ratio_d * d + 0.5) : static_cast<int>(ratio_d * d); for (int k = 0; k < out_h; k++) { // loop for images int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { input_grad_t(i, j, in_d, in_k, in_l) += output_grad_t(i, j, d, k, l); } else { input_grad_t(i, in_d, in_k, in_l, j) += output_grad_t(i, d, k, l, j); } } } } } } } template <typename T> static void BilinearInterpolationGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int k = 0; k < out_h; k++) { // loop for images int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // bilinear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(i, j, k, l); input_grad_t(i, j, y_n, x_w) += static_cast<T>(grad * d_s * d_e); input_grad_t(i, j, y_s, x_w) += static_cast<T>(grad * d_n * d_e); input_grad_t(i, j, y_n, x_e) += static_cast<T>(grad * d_s * d_w); input_grad_t(i, j, y_s, x_e) += static_cast<T>(grad * d_n * d_w); } else { const T grad = output_grad_t(i, k, l, j); input_grad_t(i, y_n, x_w, j) += static_cast<T>(grad * d_s * d_e); input_grad_t(i, y_s, x_w, j) += static_cast<T>(grad * d_n * d_e); input_grad_t(i, y_n, x_e, j) += static_cast<T>(grad * d_s * d_w); input_grad_t(i, y_s, x_e, j) += static_cast<T>(grad * d_n * d_w); } } } } } } template <typename T> static void TrilinearInterpolationGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_d, const float ratio_h, const float ratio_w, const int in_d, const int in_h, const int in_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 5>::From(*input_grad); auto output_grad_t = EigenTensor<T, 5>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int j = 0; j < out_d; j++) { // loop for D int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5) : static_cast<int>(ratio_d * j); t_f = (t_f > 0) ? t_f : 0; int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1); float idx_src_t = ratio_d * (j + 0.5) - 0.5; idx_src_t = (idx_src_t > 0) ? idx_src_t : 0; float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f; float d_b = 1.f - d_f; for (int k = 0; k < out_h; k++) { // loop for H int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; for (int l = 0; l < out_w; l++) { // loop for W int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; for (int b = 0; b < n; b++) { // loop for batches for (int i = 0; i < c; i++) { // loop for channels // trilinear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(b, i, j, k, l); input_grad_t(b, i, t_f, y_n, x_w) += static_cast<T>(grad * d_b * d_s * d_e); input_grad_t(b, i, t_f, y_n, x_e) += static_cast<T>(grad * d_b * d_s * d_w); input_grad_t(b, i, t_f, y_s, x_w) += static_cast<T>(grad * d_b * d_n * d_e); input_grad_t(b, i, t_f, y_s, x_e) += static_cast<T>(grad * d_b * d_n * d_w); input_grad_t(b, i, t_b, y_n, x_w) += static_cast<T>(grad * d_f * d_s * d_e); input_grad_t(b, i, t_b, y_n, x_e) += static_cast<T>(grad * d_f * d_s * d_w); input_grad_t(b, i, t_b, y_s, x_w) += static_cast<T>(grad * d_f * d_n * d_e); input_grad_t(b, i, t_b, y_s, x_e) += static_cast<T>(grad * d_f * d_n * d_w); } else { const T grad = output_grad_t(b, j, k, l, i); input_grad_t(b, t_f, y_n, x_w, i) += static_cast<T>(grad * d_b * d_s * d_e); input_grad_t(b, t_f, y_n, x_e, i) += static_cast<T>(grad * d_b * d_s * d_w); input_grad_t(b, t_f, y_s, x_w, i) += static_cast<T>(grad * d_b * d_n * d_e); input_grad_t(b, t_f, y_s, x_e, i) += static_cast<T>(grad * d_b * d_n * d_w); input_grad_t(b, t_b, y_n, x_w, i) += static_cast<T>(grad * d_f * d_s * d_e); input_grad_t(b, t_b, y_n, x_e, i) += static_cast<T>(grad * d_f * d_s * d_w); input_grad_t(b, t_b, y_s, x_w, i) += static_cast<T>(grad * d_f * d_n * d_e); input_grad_t(b, t_b, y_s, x_e, i) += static_cast<T>(grad * d_f * d_n * d_w); } } } } } } } template <typename T> static void BicubicInterpolationGrad(const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); for (int k = 0; k < out_h; k++) { // loop for images T y_n = align_corners ? static_cast<T>(ratio_h * k) : static_cast<T>(ratio_h * (k + 0.5) - 0.5); int input_y = floorf(y_n); T y_t = y_n - input_y; for (int l = 0; l < out_w; l++) { T x_n = align_corners ? static_cast<T>(ratio_w * l) : static_cast<T>(ratio_w * (l + 0.5) - 0.5); int input_x = floorf(x_n); T x_t = x_n - input_x; T x_coeffs[4]; T y_coeffs[4]; get_cubic_upsample_coefficients<T>(x_coeffs, x_t); get_cubic_upsample_coefficients<T>(y_coeffs, y_t); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // bicubic interpolation grad for (int ii = 0; ii < 4; ii++) { for (int jj = 0; jj < 4; jj++) { int access_x = std::max(std::min(input_x - 1 + ii, in_w - 1), static_cast<int>(0)); int access_y = std::max(std::min(input_y - 1 + jj, in_h - 1), static_cast<int>(0)); if (data_layout == DataLayout::kNCHW) { T grad = output_grad_t(i, j, k, l); input_grad_t(i, j, access_y, access_x) += grad * y_coeffs[jj] * x_coeffs[ii]; } else { T grad = output_grad_t(i, k, l, j); input_grad_t(i, access_y, access_x, j) += grad * y_coeffs[jj] * x_coeffs[ii]; } } } } } } } } template <typename T> static void Interpolate1DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); float scale_w = -1.; if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_w = new_size[0]; } else { // float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale_w = scale_data[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } else { if (scale.size() > 0) { scale_w = scale[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } } if (scale_w > 0.) { out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_w = out_size_data[0]; } } PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_w}; } else { dim_out = {n, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_w = 0.f; if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("linear" == interp_method) { LinearInterpolation<T>(input, output, ratio_w, in_w, n, c, out_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_h = -1; float scale_w = -1; auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_h = new_size[0]; out_w = new_size[1]; } else { auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_h = scale_data[0]; scale_w = scale_data[1]; } else { scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } else { if (scale.size() > 1) { scale_h = scale[0]; scale_w = scale[1]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } } if (scale_h > 0. && scale_w > 0.) { out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_h = out_size_data[0]; out_w = out_size_data[1]; } } PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_h, out_w}; } else { dim_out = {n, out_h, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("bilinear" == interp_method) { BilinearInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighborInterpolate<T>(input, output, ratio_h, ratio_w, n, c, out_h, out_w, align_corners, data_layout); } else if ("bicubic" == interp_method) { BicubicInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_d = -1; float scale_h = -1; float scale_w = -1; auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } else { auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_d = scale_data[0]; scale_h = scale_data[1]; scale_w = scale_data[2]; } else { scale_d = scale_data[0]; scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } else { if (scale.size() > 1) { scale_d = scale[0]; scale_h = scale[1]; scale_w = scale[2]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } } if (scale_w > 0. && scale_h > 0. && scale_d > 0.) { out_d = static_cast<int>(in_d * scale_d); out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_d = out_size_data[0]; out_h = out_size_data[1]; out_w = out_size_data[2]; } } PADDLE_ENFORCE_GT(out_d, 0, platform::errors::InvalidArgument( "out_d in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_d, out_h, out_w}; } else { dim_out = {n, out_d, out_h, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { float new_scale_d = 0.f; new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d) : static_cast<float>(in_d) / out_d; ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(new_scale_d); } if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("trilinear" == interp_method) { TrilinearInterpolation<T>(input, output, ratio_d, ratio_h, ratio_w, in_d, in_h, in_w, n, c, out_d, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighbor3DInterpolate<T>(input, output, ratio_d, ratio_h, ratio_w, n, c, out_d, out_h, out_w, align_corners, data_layout); } } template <typename T> static void Interpolate1DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor& output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); float scale_w = -1.0; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale_w = scale_data[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } else { if (scale.size() > 0) { scale_w = scale[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } } if (scale_w > 0.) { out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_w = out_size_data[0]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_w = new_size[0]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_w}; } else { dim_grad = {n, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); pten::funcs::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_w = 0.f; if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("linear" == interp_method) { LinearInterpolationGrad<T>(output_grad, input_grad, ratio_w, in_w, n, c, out_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor& output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_h = -1; float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_h = scale_data[0]; scale_w = scale_data[1]; } else { scale_w = scale_data[0]; scale_h = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } else { if (scale.size() > 1) { scale_h = scale[0]; scale_w = scale[1]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } } if (scale_h > 0. && scale_w > 0.) { out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_h = out_size_data[0]; out_w = out_size_data[1]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_h = new_size[0]; out_w = new_size[1]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_h, in_w}; } else { dim_grad = {n, in_h, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); pten::funcs::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("bilinear" == interp_method) { BilinearInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighborInterpolateGrad<T>(output_grad, input_grad, ratio_h, ratio_w, n, c, out_h, out_w, align_corners, data_layout); } else if ("bicubic" == interp_method) { BicubicInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_d = -1; float scale_h = -1; float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_d = scale_data[0]; scale_h = scale_data[1]; scale_w = scale_data[2]; } else { scale_d = scale_data[0]; scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } else { if (scale.size() > 1) { scale_d = scale[0]; scale_h = scale[1]; scale_w = scale[2]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } } if (scale_d > 0. && scale_h > 0. && scale_w > 0.) { out_d = static_cast<int>(in_d * scale_d); out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_d = out_size_data[0]; out_h = out_size_data[1]; out_w = out_size_data[2]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_d, in_h, in_w}; } else { dim_grad = {n, in_d, in_h, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); pten::funcs::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { float new_scale_d = 0.f; new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d) : static_cast<float>(in_d) / out_d; ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(new_scale_d); } if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("trilinear" == interp_method) { TrilinearInterpolationGrad<T>( output_grad, input_grad, ratio_d, ratio_h, ratio_w, in_d, in_h, in_w, n, c, out_d, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighbor3DInterpolateGrad<T>(output_grad, input_grad, ratio_d, ratio_h, ratio_w, n, c, out_d, out_h, out_w, align_corners, data_layout); } } template <typename T> class InterpolateV2Kernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<Tensor>("X"); auto* output = ctx.Output<Tensor>("Out"); auto input_dims = input->dims(); if (input_dims.size() == 3) { // 1D interpolation Interpolate1DCPUFwd<T>(ctx, *input, output); } else if (input_dims.size() == 4) { // 2D interpolation Interpolate2DCPUFwd<T>(ctx, *input, output); } else if (input_dims.size() == 5) { // 3D interpolation Interpolate3DCPUFwd<T>(ctx, *input, output); } } }; template <typename T> class InterpolateV2GradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto output_grad_dims = output_grad->dims(); if (output_grad_dims.size() == 3) { // 1D interpolation grad Interpolate1DCPUBwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 4) { // 2D interpolation grad Interpolate2DCPUBwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 5) { // 3D interpolation grad Interpolate3DCPUBwd<T>(ctx, input_grad, *output_grad); } } }; } // namespace operators } // namespace paddle
01_array_sum.c
/* ────────────────────────────────────────────────────────────────────────── * │ │ │ This file is part of the exercises for the Lectures on │ │ "Foundations of High Performance Computing" │ │ given at │ │ Master in HPC and │ │ Master in Data Science and Scientific Computing │ │ @ SISSA, ICTP and University of Trieste │ │ │ │ contact: luca.tornatore@inaf.it │ │ │ │ This is free software; you can redistribute it and/or modify │ │ it under the terms of the GNU General Public License as published by │ │ the Free Software Foundation; either version 3 of the License, or │ │ (at your option) any later version. │ │ This code is distributed in the hope that it will be useful, │ │ but WITHOUT ANY WARRANTY; without even the implied warranty of │ │ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the │ │ GNU General Public License for more details. │ │ │ │ You should have received a copy of the GNU General Public License │ │ along with this program. If not, see <http://www.gnu.org/licenses/> │ │ │ * ────────────────────────────────────────────────────────────────────────── */ /* * COMPILE LINE (icc): -Ofast -fno-alias -xCORE-AVX2 -xHost -fma -use-intel-optimized-headers -falign-loops -qopenmp -parallel -pthread -ipo -vec */ #if defined(__STDC__) #if (__STDC_VERSION__ >= 199901L) #define _XOPEN_SOURCE 700 #endif #endif #define _GNU_SOURCE #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #define N_default 100 #if defined(_OPENMP) #define CPU_TIME (clock_gettime(CLOCK_REALTIME, &ts), (double)ts.tv_sec + (double)ts.tv_nsec * 1e-9) #define CPU_TIME_th \ (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &myts), (double)myts.tv_sec + (double)myts.tv_nsec * 1e-9) #else #define CPU_TIME (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts), (double)ts.tv_sec + (double)ts.tv_nsec * 1e-9) #endif int main(int argc, char** argv) { int N = N_default; int nthreads = 1; struct timespec ts; double* array; /* ----------------------------------------------------------------------------- * initialize * ----------------------------------------------------------------------------- */ // check whether some arg has been passed on if (argc > 1) N = atoi(*(argv + 1)); // allocate memory if ((array = (double*)malloc(N * sizeof(double))) == NULL) { printf("I'm sorry, there is not enough memory to host %lu bytes\n", (unsigned int)N * sizeof(double)); return 1; } // just give notice of what will happen and get the number of threads used #ifndef _OPENMP printf("serial summation\n"); #else #pragma omp parallel { #pragma omp master { nthreads = omp_get_num_threads(); printf("omp summation with %d threads\n", nthreads); } } #endif // initialize the array srand48(time(NULL)); for (int ii = 0; ii < N; ii++) array[ii] = (double)ii; // choose the initialization you prefer; //array[ii] = drand48(); // the first one (with integers) makes it // easy to check the result /* ----------------------------------------------------------------------------- * calculate * ----------------------------------------------------------------------------- */ double S = 0; // this will store the summation double th_avg_time = 0; // this will be the average thread runtime double th_min_time = 1e11; // this will be the min thread runtime. // contrasting the average and the min // time taken by the threads, you may // have an idea of the unbalance. double tstart = CPU_TIME; #if !defined(_OPENMP) for (int ii = 0; ii < N; ii++) // well, you may notice this implementation S += array[ii]; // is particularly inefficient anyway #else #pragma omp parallel reduction(+:th_avg_time) \ reduction(min:th_min_time) // in this region there are 2 different { // reductions: the one of runtime, which struct timespec myts; // happens in the whole parallel region; double mystart = CPU_TIME_th; // and the one on S, which takes place #pragma omp for reduction(+ : S) // in the for loop. for (int ii = 0; ii < N; ii++) S += array[ii]; double mytime = CPU_TIME_th - mystart; th_avg_time += mytime; th_min_time = (mytime < th_min_time) ? mytime : th_min_time; } #endif double tend = CPU_TIME; // this timer is CLOCK_REALTIME if OpenMP // is active; CLOCK_PROCESS_CPU_TIME_ID // otherwise. That is because the latter // would accounts for the whole cpu time // used by the threads under OpenMP. /* ----------------------------------------------------------------------------- * finalize * ----------------------------------------------------------------------------- */ // printf("\nSum is %g, process took <%g> of wall-clock time\n" // "<%g> sec of avg thread-time\n" // "<%g> sec of min thread-time\n", // S, tend - tstart, th_avg_time / nthreads, th_min_time); // printf("%g SUM\n\n\n" // "%g WALL\n" // "%g THAVG\n" // "%g THMIN\n", // S, tend - tstart, th_avg_time / nthreads, th_min_time); printf("%g SUM\n\n\n" "%g\n" // Wall time "%g\n" // Average thread time (single thread avg) "%g\n", // Min thread time (single thread min) S, tend - tstart, th_avg_time / nthreads, th_min_time); free(array); return 0; }
bd_serial.c
#include <stdlib.h> #include <stdio.h> #include <unistd.h> // access #include <math.h> #include <assert.h> #include "timer.h" #include "bd.h" #include <omp.h> #include <mkl.h> #include <cilk/cilk.h> #include <cilk/cilk_api.h> #define NTHREADS 24 #define M_PI 3.14159265358979323846 #define my_EPS 0.000000001 void print_matrix(double *a, int n){ for(int i=0;i<5;i++){ for(int j=0;j<n;j++){ printf("%lf ", a[i*n+j]); } printf("\n\n"); } return; } void print_array(double *a, int n){ for(int i=0;i<n;i++){ printf("%lf ", a[i]); } printf("\n"); return; } //****************************** RPY_EWALD part ***************************************************** inline void scalar_rpy_ewald_real(double r, double xi, double a3, double *m11, double *m12) { double a = 1.; double xi2 = xi*xi; double xi3 = xi2*xi; double xi5 = xi3*xi2; double xi7 = xi5*xi2; double r2 = r*r; double r4 = r2*r2; double ri = 1./r; double ri2 = ri*ri; double ri3 = ri*ri2; double erfc_xi_r = erfc(xi*r); double pi_exp = 1./sqrt(M_PI) * exp(-xi2*r2); *m11 = (0.75*a*ri + 0.5*a3*ri3)*erfc_xi_r + ( 4*xi7*a3*r4 + 3*xi3*a*r2 - 20*xi5*a3*r2 - 4.5*xi*a + 14*xi3*a3 + xi*a3*ri2)*pi_exp; *m12 = (0.75*a*ri - 1.5*a3*ri3)*erfc_xi_r + (-4*xi7*a3*r4 - 3*xi3*a*r2 + 16*xi5*a3*r2 + 1.5*xi*a - 2*xi3*a3 - 3*xi*a3*ri2)*pi_exp; } inline void scalar_rpy_ewald_recip(double k, double xi, double *m2) { double a = 1.; double a3 = 1.; double k2 = k*k; double xii2k2 = k2/(xi*xi); *m2 = (1. + 0.25*xii2k2 + 0.125*xii2k2*xii2k2) * 6.*M_PI/k2 * exp(-0.25*xii2k2); } // note: positions must be wrapped inside the box [0,L] int rpy_ewald(int np, double * restrict a, const double * restrict pos, double L, const double * restrict rad, double xi, int nr, int nk) { // printf("Inside function rpy_ewald\n"); __declspec(align(64)) double rvec[8]; __declspec(align(64)) double rvec0[8]; __declspec(align(64)) double temp[8]; double a3; double m11, m12, m2; double eye3_coef; double r2, r; int x, y, z; int i, j; double *ap0, *ap; int vsize = ((2*nk+1)*(2*nk+1)*(2*nk+1) - 1) / 2; #define VSIZE ((2*6+1)*(2*6+1)*(2*6+1) - 1) / 2 // int A_VSIZE = ceil(VSIZE/8.0)*8; // int K_VSIZE = ceil(3*VSIZE/8.0)*8; // printf("check vsize=%d\n", A_VSIZE); __declspec(align(64)) double k_array[VSIZE];//1104 __declspec(align(64)) double m2_array[VSIZE];//1104 __declspec(align(64)) double kvec_array[3*VSIZE];//3296 int ind; __declspec(align(64)) double kvec[8]; double k; double t; double vinv = 1./(L*L*L); double time0, time1; double time0_real, time1_real; double time0_recip, time1_recip; // INDICES for converting for loops int _b, _index, ib, ib2; // ************************************************************************* // // compute and save coefficients for reciprocal-space sum // // Due to symmetry, only need half of the grid points ind = 0; _b = (2*nk+1); for (_index =0 ;_index < (_b*_b*_b -1)/2; _index++){// Using indices x,y,z are recalculated z = _index%(_b)-nk;// adjusting the indices x = (_index-_index%(_b*_b))/(_b*_b)-nk; y = (_index%(_b*_b)-_index%(_b))/_b-nk; k_array[ind] = 2.*M_PI/L*sqrt((double)(x*x + y*y + z*z)); scalar_rpy_ewald_recip(k_array[ind], xi, &m2_array[ind]); kvec_array[3*ind ] = 2.*M_PI/L*x; kvec_array[3*ind+1] = 2.*M_PI/L*y; kvec_array[3*ind+2] = 2.*M_PI/L*z; ind++; } // #pragma omp parallel for schedule(static) num_threads(NTHREADS) private(i, j, ap, ap0, _b, temp, eye3_coef, _index, rvec0, rvec, x, y, z, r, r2, m11, m12, a3 ) for (int _index1 = np*(np-1)/2-1; _index1>=0; _index1--){ i = np-1-(int)((1+sqrt(8*_index1+1))/2); j = np-1-_index1 + (int)((1+sqrt(8*_index1+1))/2)*((int)((1+sqrt(8*_index1+1))/2)-1)/2; temp[0] = 0.; temp[1] = 0.; temp[3] = 0.; temp[2] = 0.; temp[4] = 0.; temp[5] = 0.; eye3_coef = 0.; rvec0[0] = pos[3*i] - pos[3*j]; rvec0[1] = pos[3*i+1] - pos[3*j+1]; rvec0[2] = pos[3*i+2] - pos[3*j+2]; a3 = 0.5*(rad[i]*rad[i] + rad[j]*rad[j]); _b = (2*nr+1); //shared(eye3_coef, temp, rvec0, L, xi, a3, m11, m12, _b, xi3, xi5, xi7, xi) ////// #pragma omp parallel for schedule(static) private(rvec, x, y, z, r, r2, m11, m12) shared(eye3_coef, temp, rvec0, a3) for (_index =0 ;_index < _b*_b*_b; _index++){ z =_index%(_b)-nr;// adjusting the indices x = (_index-_index%(_b*_b))/(_b*_b)-nr; y = (_index%(_b*_b)-_index%(_b))/_b-nr; rvec[0] = rvec0[0] + x*L; rvec[1] = rvec0[1] + y*L; rvec[2] = rvec0[2] + z*L; // compute norm r2 = rvec[0]*rvec[0] + rvec[1]*rvec[1] + rvec[2]*rvec[2]; r = sqrt(r2); rvec[0] /= r; rvec[1] /= r; rvec[2] /= r; scalar_rpy_ewald_real(r, xi, a3, &m11, &m12); eye3_coef += m11; temp[0] += m12 * rvec[0] * rvec[0]; temp[1] += m12 * rvec[0] * rvec[1]; temp[2] += m12 * rvec[0] * rvec[2]; temp[3] += m12 * rvec[1] * rvec[1]; temp[4] += m12 * rvec[1] * rvec[2]; temp[5] += m12 * rvec[2] * rvec[2]; } // add contribution to eye3 term temp[0] += eye3_coef; temp[3] += eye3_coef; temp[5] += eye3_coef; // sum into global matrix (only lower-triangular part) // // Use matlab to add transpose ap0 = &a[np*3*3*i + 3*j]; ap = ap0; *ap++ = temp[0]; *ap++ = temp[1]; *ap = temp[2]; ap = ap0+np*3; *ap++ = temp[1]; *ap++ = temp[3]; *ap = temp[4]; ap = ap0+np*3+np*3; *ap++ = temp[2]; *ap++ = temp[4]; *ap = temp[5]; } // reciprocal-space sum // #pragma omp parallel for schedule(static) num_threads(NTHREADS) private(i, j, temp, ap, ap0, ind, rvec, kvec, k, m2, t, a3) for (_index = np*(np+1)/2-1; _index>=0; _index--){ i = np-1-(int)((-1+sqrt(8*_index+1))/2); j = np-1-_index + (int)((-1+sqrt(8*_index+1))/2)*((int)((-1+sqrt(8*_index+1))/2)+1)/2; rvec[0] = pos[3*i+0] - pos[3*j]; rvec[1] = pos[3*i+1] - pos[3*j+1]; rvec[2] = pos[3*i+2] - pos[3*j+2]; temp[0] = 0.; temp[1] = 0.; temp[3] = 0.; temp[2] = 0.; temp[4] = 0.; temp[5] = 0.; a3 = 0.5*(rad[i]*rad[i] + rad[j]*rad[j]); for (ind=0; ind<vsize; ind++) { k = k_array[ind]; m2 = m2_array[ind]; kvec[0] = kvec_array[3*ind ]; kvec[1] = kvec_array[3*ind+1]; kvec[2] = kvec_array[3*ind+2]; t = 2.*vinv*m2*cos(kvec[0]*rvec[0] + kvec[1]*rvec[1] + kvec[2]*rvec[2])*(1.-a3*k*k/3.); kvec[0] /= k; kvec[1] /= k; kvec[2] /= k; temp[0] += t * (1. - kvec[0]*kvec[0]); temp[1] += t * - kvec[0]*kvec[1]; temp[2] += t * - kvec[0]*kvec[2]; temp[3] += t * (1. - kvec[1]*kvec[1]); temp[4] += t * - kvec[1]*kvec[2]; temp[5] += t * (1. - kvec[2]*kvec[2]); } // sum into matrix // // sum with existing values ap0 = &a[np*3*3*i + 3*j]; ap = ap0; *ap++ += temp[0]; *ap++ += temp[1]; *ap += temp[2]; ap = ap0+np*3; *ap++ += temp[1]; *ap++ += temp[3];// diagonal element *ap += temp[4]; ap = ap0+np*3+np*3; *ap++ += temp[2]; *ap++ += temp[4]; *ap += temp[5];// diagonal element } // self-part for (i=0; i<np; i++)// adding some term to diagonal { t = 1./rad[i] - (6. - 40./3.*xi*xi*rad[i]*rad[i])*xi/sqrt(M_PI); t *= 0.5; for (j=0; j<3; j++) { ind = 3*i+j; a[ind*np*3+ind] = a[ind*np*3+ind]*0.5+t;// taking care of (i==j) condition } } return 0; } //************************************************************************************************** void get_indices(int index, int *i, int *j, int *k, int b){ int ib, ib2; ib = index%(b); ib2 = index%(b*b); *k = ib; *i = (index-ib2)/(b*b); *j = (ib2-*k)/b; return; } struct box { int head; }; // it is possible to use smaller boxes and more complex neighbor patterns #define NUM_BOX_NEIGHBORS 14 int box_neighbors[NUM_BOX_NEIGHBORS][3] = { {-1,-1,-1}, {-1,-1, 0}, {-1,-1,+1}, {-1, 0,-1}, {-1, 0, 0}, {-1, 0,+1}, {-1,+1,-1}, {-1,+1, 0}, {-1,+1,+1}, { 0,-1,-1}, { 0,-1, 0}, { 0,-1,+1}, { 0, 0,-1}, { 0, 0, 0} // will calculate within the box interactions }; /* // CHECK RPY************* int gold_read(const char *filename, int npos, double *gold) { int npos_read; FILE *fp = fopen(filename, "r"); assert(fp); fscanf(fp, "%d\n", &npos_read); char label[100]; fgets(label, 100, fp); assert(npos == npos_read); for (int i=0; i<3*npos; i++) { for (int j=0; j<3*npos; j++) { fscanf(fp, "%lf\n", &gold[i*(3*npos) + j]); } } fclose(fp); return 0; } double compare_gold(int npos, double *a,double *gold) { double err = 0.0; printf("a = %lf\n", a[3]); printf("gold = %lf\n", gold[3]); for (int i=0; i<npos; i++) { for (int j=0; j<npos; j++) { double diff = a[i*(npos*3) + j] - gold[i*(npos*3) +j]; err += diff*diff; // if(err>0){printf("error at position: i=%d j=%d and err = %lf\n", i, j, err);} // printf("error at position: i=%d j=%d and err = %lf\n", i, j, err); } } return err; } // ********************** */ int bd(int npos, double * restrict pos_orig, double * restrict buf, const int *types, double L, double * restrict pos, int* restrict next, double* restrict forces, double f_const, double * restrict au, double * restrict rad, double xi, int nr, int nk, double * restrict hd_vec) { // __ilkrts_set_param("nworkers", NTHREADS); // __cilkrts_set_param("nworkers", "24"); /* //************************** CHECK RPY part *************************************************** printf("npos = %d, L= %lf\n", npos, L); char *gold_filename = "gold.dat"; double *gold = (double *) _mm_malloc((3*npos) * (3*npos) * sizeof(double), 64); if (access(gold_filename, F_OK) == -1) { printf("[WARNING] Unable to access gold file \"%s\"; comparison will not proceed.\n", gold_filename); } else { gold_read(gold_filename, npos, gold); } rpy_ewald(npos, au, pos_orig, L, rad, xi, nr, nk);// DELETE after testing double error = compare_gold(npos, au, gold); printf("Squared Error: %f\n", error); return 500; //********************************************************************************************* */ /* // generate random values from standard normal distribution // note: this MKL function is sequential but vectorized vdRngGaussian(VSL_RNG_METHOD_GAUSSIAN_BOXMULLER, stream, 3*npos, buf, 0., 1.); // printf("Calculating the Hydrodynamic Interations for the given particle positions\n"); // au = upper triangular matrix with hydrodynamic interaction values // pos = wrapped up position inside the box_width = L; // rad = radius of particles; xi, nr, nk are constants. rpy_ewald(npos, au, pos, L, rad, xi, nr, nk); print_matrix(au, 3*npos); printf("Getting the cholesky decomposition\n"); LAPACKE_dpotrf(LAPACK_ROW_MAJOR, 'U', 3*npos, au, 3*npos); // Get interations vector by multiplying l_cols by buf) print_matrix(au, 3*npos); // print_matrix(au, 3*npos); printf("Multiplying by random gaussian vector \n"); cblas_dgemm(CblasRowMajor, CblasTrans, CblasTrans, 3*npos, 1, 3*npos, 1, au, 3*npos, buf, 3*npos, 0, hd_vec, 1); print_array(buf, 3*npos); printf("printing the correlation vector\n"); print_array(hd_vec, 3*npos); */ // Initialisations required for INTERACTION FUNCTION******** NOTE: Can take input to bd itself!!! double krepul = 100, a=1, a_sq, phi=0.2, f; a_sq = a*a; int boxdim;// boxdim is number of cells in L double cutoff2; int numpairs_p; cutoff2 = 4;// cutoff < L/boxdim boxdim =(int)(L/cutoff2)*a;//(int)(L/cutoff2*0.8); printf("L = %lf cutoff2 = %lf boxdim = %d\n", L, cutoff2, boxdim); struct box b[boxdim][boxdim][boxdim]; struct box *bp; struct box *neigh_bp; // box indices int idx, idy, idz, index, box2, ib2; int neigh_idx, neigh_idy, neigh_idz; // allocate implied linked list int p1, p2, j, i; double d2, dx, dy, dz, s; box2 = boxdim*boxdim; //*****************************************END initialisations*********************************** if (boxdim < 4 || cutoff2 > (L/boxdim)*(L/boxdim)) { printf("interactions: bad input parameters\n"); // return 1; } double t0, t_init_cells = 0, t_assign_to_cells=0, t_update_pos=0, t_force=0, t_hd = 0, t_cho = 0; for (int step=0; step<INTERVAL_LEN; step++) { // printf("step = %d\n", step); // Calculation of interaction per time step t0 = time_in_seconds(); // allocate memory for particles in each box // #pragma omp parallel for schedule(static) private(idx, idy, idz, ib2) shared(b, boxdim, box2) // for (index=0; index<boxdim*box2; index++){ // idz = index%(boxdim); // ib2 = index%(box2); // idx = (index-ib2)/(box2); // idy = (ib2-idz)/boxdim; // b[idx][idy][idz].head=-1; // } for (idx=0; idx<boxdim; idx++){ for (idy=0; idy<boxdim; idy++){ for (idz=0; idz<boxdim; idz++){ b[idx][idy][idz].head=-1; } } } t_init_cells += time_in_seconds()-t0; t0 = time_in_seconds(); // traverse all particles and assign to boxes // #pragma omp parallel for schedule(static) private(i, idx, idy, idz, bp) shared(b, next) num_threads(NTHREADS) for (i=0; i<npos; i++) { if (pos_orig[3*i] >= 0){pos[3*i]= fmod(pos_orig[3*i], L);}// OR SINCE PARTICLES moving slowly.. change to -L else {// pos_orig[i] is negative pos[3*i] = L-fmod(-1*pos_orig[3*i], L); } if (pos_orig[3*i+1] >= 0){pos[3*i+1]= fmod(pos_orig[3*i+1], L);}// OR SINCE PARTICLES moving slowly.. change to -L else {// pos_orig[i] is negative pos[3*i+1] = L-fmod(-1*pos_orig[3*i+1], L); } if (pos_orig[3*i+2] >= 0){pos[3*i+2]= fmod(pos_orig[3*i+2], L);}// OR SINCE PARTICLES moving slowly.. change to -L else {// pos_orig[i] is negative pos[3*i+2] = L-fmod(-1*pos_orig[3*i+2], L); } if (pos[3*i]<0){printf("pos_orig = %lf pos defect = %lf and i = %d and L =%lf\n", pos_orig[3*i], pos[3*i], i, L);} // initialize entry of implied linked list next[i] = -1; forces[3*i+0] = 0; forces[3*i+1] = 0; forces[3*i+2] = 0; // re-initialising interaction forces at each time step // which box does the particle belong to? // assumes particles have positions within [0,L]^3 idx = (int)(pos[3*i ]/L*boxdim); idy = (int)(pos[3*i+1]/L*boxdim); idz = (int)(pos[3*i+2]/L*boxdim); // add to beginning of implied linked list bp = &b[idx][idy][idz]; // next[i] = bp->head; // next = previous (my notation) // #pragma omp critical // { next[i] = bp->head; // next = previous (my notation) bp->head = i; // head = latest (my notation) // } } t_assign_to_cells += time_in_seconds()-t0; t0 = time_in_seconds(); // #pragma omp parallel for schedule(static) private(j, neigh_idx, neigh_idy, neigh_idz, neigh_bp, p1, p2, dx, dy, dz, d2, s, f, idx, idy, idz, ib2, bp) shared(b, box_neighbors, boxdim, L, pos, forces, krepul, a, a_sq, next, box2) num_threads(NTHREADS) for (index=0; index<boxdim*box2; index++){ idz = index%(boxdim); ib2 = index%(box2); idx = (index-ib2)/(box2); idy = (ib2-idz)/boxdim; bp = &b[idx][idy][idz]; // interactions within and other boxes // #pragma omp parallel for schedule(static) private(j, neigh_idx, neigh_idy, neigh_idz, neigh_bp, p1, p2, dx, dy, dz, d2, s, f) shared(bp, b, box_neighbors, boxdim, L, pos, forces, krepul, a, a_sq, next, idx, idy, idz)// num_threads(NTHREADS) for (j=0; j<NUM_BOX_NEIGHBORS; j++) { neigh_idx = (idx + box_neighbors[j][0] + boxdim) % boxdim; neigh_idy = (idy + box_neighbors[j][1] + boxdim) % boxdim; neigh_idz = (idz + box_neighbors[j][2] + boxdim) % boxdim; neigh_bp = &b[neigh_idx][neigh_idy][neigh_idz]; // when using boxes, the minimum image computation is // known beforehand, thus we can compute position offsets // to compensate for wraparound when computing distances double xoffset = 0.; double yoffset = 0.; double zoffset = 0.; if (idx + box_neighbors[j][0] == -1) xoffset = -L; if (idy + box_neighbors[j][1] == -1) yoffset = -L; if (idz + box_neighbors[j][2] == -1) zoffset = -L; if (idx + box_neighbors[j][0] == boxdim) xoffset = L; if (idy + box_neighbors[j][1] == boxdim) yoffset = L; if (idz + box_neighbors[j][2] == boxdim) zoffset = L; // NOTE: modifying the function to update the forces p1 = neigh_bp->head; while (p1 != -1) { p2 = bp->head; while (p2 != -1) { // compute distance vector dx = pos[3*p1+0] - pos[3*p2+0] + xoffset; dy = pos[3*p1+1] - pos[3*p2+1] + yoffset; dz = pos[3*p1+2] - pos[3*p2+2] + zoffset; d2 = dx*dx+dy*dy+dz*dz+my_EPS; if ( d2<4.0*a_sq) { s = sqrt(d2); f = krepul*(2*a-s); // #pragma omp atomic forces[3*p1+0] += f*dx/s; // #pragma omp atomic forces[3*p1+1] += f*dy/s; // #pragma omp atomic forces[3*p1+2] += f*dz/s; // #pragma omp atomic forces[3*p2+0] -= f*dx/s; // #pragma omp atomic forces[3*p2+1] -= f*dy/s; // #pragma omp atomic forces[3*p2+2] -= f*dz/s; } p2 = next[p2]; } p1 = next[p1]; } } } t_force += time_in_seconds() - t0; t0 = time_in_seconds(); // printf("Calculating the Hydrodynamic Interations for the given particle positions\n"); // au = upper triangular matrix with hydrodynamic interaction values // pos = wrapped up position inside the box_width = L; // rad = radius of particles; xi, nr, nk are constants. for (int p1=0; p1<3*npos*3*npos; p1++){ au[p1] = 0; } rpy_ewald(npos, au, pos, L, rad, xi, nr, nk); t_hd += time_in_seconds() - t0; // print_matrix(au, 3*npos); // printf("Getting the cholesky decomposition\n"); t0 = time_in_seconds(); LAPACKE_dpotrf(LAPACK_ROW_MAJOR, 'U', 3*npos, au, 3*npos); t_cho += time_in_seconds() - t0; // Get interations vector by multiplying l_cols by buf) // print_matrix(au, 3*npos); // print_matrix(au, 3*npos); // printf("Multiplying by random gaussian vector \n"); t0 = time_in_seconds(); // generate random values from standard normal distribution // note: this MKL function is sequential but vectorized vdRngGaussian(VSL_RNG_METHOD_GAUSSIAN_BOXMULLER, stream, 3*npos, buf, 0., 1.); cblas_dgemm(CblasRowMajor, CblasTrans, CblasTrans, 3*npos, 1, 3*npos, 1, au, 3*npos, buf, 3*npos, 0, hd_vec, 1); // print_array(buf, 3*npos); // printf("printing the correlation vector\n"); // print_array(hd_vec, 3*npos); // update positions with Brownian displacements // #pragma omp parallel for schedule(static) shared(pos_orig) private(i) num_threads(NTHREADS) for (int i=0; i<3*npos; i++) { // pos_orig[i] += forces[i]*DELTAT+f_const*buf[i]; pos_orig[i] += forces[i]*DELTAT+f_const*hd_vec[i]; } t_update_pos += time_in_seconds() - t0; } printf("--------------------------------------------------------\n"); printf("Time: %f for initiating the cell head \n", t_init_cells); printf("Time: %f for assigning particles to cells \n", t_assign_to_cells); printf("Time: %f for force calculations \n", t_force); printf("Time: %f for hydrodynamic \n", t_hd); printf("Time: %f for cholesky \n", t_cho); printf("Time: %f for pos update \n", t_update_pos); printf("--------------------------------------------------------\n"); return 0; }
pr58809.c
/* PR middle-end/58809 */ /* { dg-do compile } */ /* { dg-options "-fopenmp" } */ _Complex int j; _Complex double d; void foo (void) { #pragma omp parallel reduction (&:j) /* { dg-error "has invalid type for|user defined reduction not found for" } */ ; #pragma omp parallel reduction (|:j) /* { dg-error "has invalid type for|user defined reduction not found for" } */ ; #pragma omp parallel reduction (^:j) /* { dg-error "has invalid type for|user defined reduction not found for" } */ ; #pragma omp parallel reduction (min:j) /* { dg-error "has invalid type for|user defined reduction not found for" } */ ; #pragma omp parallel reduction (max:j) /* { dg-error "has invalid type for|user defined reduction not found for" } */ ; #pragma omp parallel reduction (&:d) /* { dg-error "has invalid type for|user defined reduction not found for" } */ ; #pragma omp parallel reduction (|:d) /* { dg-error "has invalid type for|user defined reduction not found for" } */ ; #pragma omp parallel reduction (^:d) /* { dg-error "has invalid type for|user defined reduction not found for" } */ ; #pragma omp parallel reduction (min:d) /* { dg-error "has invalid type for|user defined reduction not found for" } */ ; #pragma omp parallel reduction (max:d) /* { dg-error "has invalid type for|user defined reduction not found for" } */ ; }
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 24; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
viter.c
/* © 2011-2015 by Kornel Lesiński. This file is part of libimagequant. libimagequant is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. libimagequant is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with libimagequant. If not, see <http://www.gnu.org/licenses/>. */ #include "libimagequant.h" #include "pam.h" #include "viter.h" #include "nearest.h" #include <stdlib.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_max_threads() 1 #define omp_get_thread_num() 0 #endif /* * Voronoi iteration: new palette color is computed from weighted average of colors that map to that palette entry. */ LIQ_PRIVATE void viter_init(const colormap *map, const unsigned int max_threads, viter_state average_color[]) { memset(average_color, 0, sizeof(average_color[0])*(VITER_CACHE_LINE_GAP+map->colors)*max_threads); } LIQ_PRIVATE void viter_update_color(const f_pixel acolor, const float value, const colormap *map, unsigned int match, const unsigned int thread, viter_state average_color[]) { match += thread * (VITER_CACHE_LINE_GAP+map->colors); average_color[match].a += acolor.a * value; average_color[match].r += acolor.r * value; average_color[match].g += acolor.g * value; average_color[match].b += acolor.b * value; average_color[match].total += value; } LIQ_PRIVATE void viter_finalize(colormap *map, const unsigned int max_threads, const viter_state average_color[]) { for (unsigned int i=0; i < map->colors; i++) { double a=0, r=0, g=0, b=0, total=0; // Aggregate results from all threads for(unsigned int t=0; t < max_threads; t++) { const unsigned int offset = (VITER_CACHE_LINE_GAP+map->colors) * t + i; a += average_color[offset].a; r += average_color[offset].r; g += average_color[offset].g; b += average_color[offset].b; total += average_color[offset].total; } if (total && !map->palette[i].fixed) { map->palette[i].acolor = (f_pixel){ .a = a / total, .r = r / total, .g = g / total, .b = b / total, }; } else { total = i/1024.0; } map->palette[i].popularity = total; } } LIQ_PRIVATE double viter_do_iteration(histogram *hist, colormap *const map, viter_callback callback, const bool fast_palette) { const unsigned int max_threads = omp_get_max_threads(); viter_state average_color[(VITER_CACHE_LINE_GAP+map->colors) * max_threads]; viter_init(map, max_threads, average_color); struct nearest_map *const n = nearest_init(map, fast_palette); hist_item *const achv = hist->achv; const int hist_size = hist->size; double total_diff=0; #pragma omp parallel for if (hist_size > 3000) \ schedule(static) default(none) shared(average_color,callback) reduction(+:total_diff) for(int j=0; j < hist_size; j++) { float diff; unsigned int match = nearest_search(n, &achv[j].acolor, achv[j].tmp.likely_colormap_index, &diff); achv[j].tmp.likely_colormap_index = match; total_diff += diff * achv[j].perceptual_weight; viter_update_color(achv[j].acolor, achv[j].perceptual_weight, map, match, omp_get_thread_num(), average_color); if (callback) callback(&achv[j], diff); } nearest_free(n); viter_finalize(map, max_threads, average_color); return total_diff / hist->total_perceptual_weight; }
HEAT_MPI_OPENMP.c
#include "mpi.h" #include <stdio.h> #include <stdlib.h> #include "omp.h" #define NXPROB 1008 /* x dimension of problem grid */ #define NYPROB 1008 /* y dimension of problem grid */ #define STEPS 100 /* number of time steps */ #define MAXWORKER 16 /* maximum number of worker tasks */ #define MINWORKER 1 /* minimum number of worker tasks */ #define BEGIN 1 /* message tag */ #define LTAG 2 /* message tag */ #define RTAG 3 /* message tag */ #define UTAG 4 /* message tag */ #define DTAG 5 /* message tag */ #define NONE -1 /* indicates no neighbor */ #define DONE 10 /* message tag */ #define MASTER 0 /* taskid of first process */ #define OPENMPTHR 4 #include <omp.h> struct Parms { float cx; float cy; } parms = {0.1, 0.1}; int main (int argc, char *argv[]) { void inidat(), prtdat(), updateinner(),updateouter(); int taskid,square_root, /* this task's unique id */ numtasks, /* number of tasks */ averblock,blocks_s,offset,extra, /* for sending blocks_s of data */ dest, source, /* to - from for message send-receive */ left,right,up,down, /* neighbor tasks */ msgtype, /* for message types */ rc,start,end, /* misc */ i,j,checki,checkj,ix,iy,iz,it,sum1,sum2,flag=0,allflag=1,mpisupport; /* loop variables */ MPI_Status status; MPI_Datatype sendtype_col,sendtype_row,sendarray,receivearray; MPI_Comm comm; MPI_Request sendrequests[4],receiverequests[4],finalsend; double starttime, endtime; float u[NXPROB][NYPROB]; int curr_task; int offsetx=0; int offsety=0; MPI_Request stats; MPI_Init(&argc,&argv); omp_set_dynamic(0); omp_set_num_threads(OPENMPTHR); starttime = MPI_Wtime(); MPI_Comm_size(MPI_COMM_WORLD,&numtasks); MPI_Comm_rank(MPI_COMM_WORLD,&taskid); for(square_root=1;square_root<=7;square_root++) if(square_root*square_root==numtasks) break; MPI_Request finalsends[numtasks]; int dim[2]={square_root,square_root}; int period[2] ={0,0}; MPI_Cart_create(MPI_COMM_WORLD, 2, dim, period, 1, &comm); int num_of_blocks = NYPROB/square_root; blocks_s = NYPROB/square_root; MPI_Type_vector(blocks_s+2, 1, blocks_s+2, MPI_FLOAT, &sendtype_col); MPI_Type_commit(&sendtype_col); MPI_Type_vector(blocks_s+2, 1, 1, MPI_FLOAT, &sendtype_row); MPI_Type_commit(&sendtype_row); int big_size[2] ={blocks_s+2,blocks_s+2}; int small_size[2] ={blocks_s,blocks_s}; int grid_size[2]={NXPROB,NYPROB}; int new_arrstart[2] ={1,1}; //int grid_start[2]={0,0}; idio me to period MPI_Type_create_subarray(2, big_size,small_size,new_arrstart,MPI_ORDER_C, MPI_FLOAT, &sendarray); MPI_Type_commit(&sendarray); MPI_Type_create_subarray(2, grid_size, small_size, period,MPI_ORDER_C, MPI_INT, &receivearray); MPI_Type_commit(&receivearray); float localu[2][blocks_s+2][blocks_s+2]; for (i = 0; i <=blocks_s+1; i++) for (j = 0; j <= blocks_s+1; j++){ localu[0][i][j] = (float)(i*(blocks_s-i-1)*j*(blocks_s-j-1)); localu[1][i][j] = (float)(i*(blocks_s-i-1)*j*(blocks_s-j-1)); } iz = 0; MPI_Cart_shift(comm,1,1,&left,&right); MPI_Cart_shift(comm,0,1,&up,&down); MPI_Send_init(&localu[iz][1][0] , 1, sendtype_row, up, DTAG, MPI_COMM_WORLD, &sendrequests[0] ); MPI_Send_init(&localu[iz][blocks_s][0],1, sendtype_row, down, UTAG, MPI_COMM_WORLD,&sendrequests[1]); MPI_Send_init(&localu[iz][0][1], 1 , sendtype_col,left, LTAG, MPI_COMM_WORLD,&sendrequests[2]); MPI_Send_init(&localu[iz][0][blocks_s], 1 , sendtype_col, right, RTAG, MPI_COMM_WORLD,&sendrequests[3]); MPI_Recv_init(&localu[iz][0][0], 1, sendtype_row, up, UTAG , MPI_COMM_WORLD, &receiverequests[0]); MPI_Recv_init(&localu[iz][blocks_s+1][0],1, sendtype_row, down, DTAG, MPI_COMM_WORLD, &receiverequests[1]); MPI_Recv_init(&localu[iz][0][0], 1, sendtype_col, left,RTAG , MPI_COMM_WORLD, &receiverequests[2]); MPI_Recv_init(&localu[iz][0][blocks_s+1], 1, sendtype_col, right, LTAG, MPI_COMM_WORLD, &receiverequests[3]); for (it = 1; it <= STEPS; it++){ MPI_Startall(4,sendrequests); MPI_Startall(4,receiverequests); updateinner(blocks_s,blocks_s+2,&localu[iz][0][0],&localu[1-iz][0][0]); MPI_Waitall(4,receiverequests,MPI_STATUSES_IGNORE); updateouter(up,down,left,right,blocks_s,blocks_s+2,taskid,num_of_blocks,&localu[iz][0][0],&localu[1-iz][0][0]); MPI_Waitall(4,sendrequests,MPI_STATUSES_IGNORE); iz=1 - iz; } if(numtasks==1){ MPI_Type_free(&sendarray); MPI_Type_free(&sendtype_col); MPI_Type_free(&sendtype_row); MPI_Type_free(&receivearray); endtime = MPI_Wtime(); printf("That took %f seconds\n",endtime-starttime); MPI_Finalize(); return; } MPI_Isend(&(localu[iz][0][0]), 1, sendarray, 0, DONE, MPI_COMM_WORLD,&finalsend); if (taskid == MASTER) { for(curr_task=0;curr_task<numtasks;curr_task++){ MPI_Irecv(&(u[offsetx][offsety]), 1, receivearray, curr_task, DONE, MPI_COMM_WORLD, &finalsends[curr_task]); if(offsety+blocks_s>NYPROB-1){ offsety=0; offsetx+=blocks_s; }else offsety+=blocks_s; } MPI_Waitall(numtasks, finalsends,MPI_STATUSES_IGNORE); } MPI_Wait(&finalsend,MPI_STATUS_IGNORE); MPI_Type_free(&sendarray); MPI_Type_free(&sendtype_col); MPI_Type_free(&sendtype_row); MPI_Type_free(&receivearray); endtime = MPI_Wtime(); printf("That took %f seconds\n",endtime-starttime); MPI_Finalize(); } /************************************************************************** * subroutine updateinner ****************************************************************************/ void updateinner(int sizep,int size, float *u1, float *u2) { int ix, iy; #pragma omp parallel { int chunk=sizep/omp_get_num_threads(); #pragma omp for schedule(static,chunk) private(iy) nowait for (ix = 2; ix < sizep; ix++){ for (iy = 2; iy < sizep; iy++){ *(u2+ix*size+iy) = *(u1+ix*size+iy) + parms.cx * (*(u1+(ix+1)*size+iy) + *(u1+(ix-1)*size+iy) - 2.0 * *(u1+ix*size+iy)) + parms.cy * (*(u1+ix*size+iy+1) + *(u1+ix*size+iy-1) - 2.0 * *(u1+ix*size+iy)); } } } } /************************************************************************** * subroutine updateouter ****************************************************************************/ void updateouter(int up,int down,int left,int right,int sizep, int size, int task,int nblocks, float *u1, float *u2) { int ix,iy,lines_per_thread; #pragma omp parallel { if (up != NONE){ int chunk=sizep/omp_get_num_threads(); #pragma omp for schedule(static,chunk) private(iy) nowait for(ix=size+1;ix<=2*size-2;ix++){ *(u2+ix) = *(u1+ix) + parms.cx * (*(u1+ix+size) + *(u1+ix-size) - 2.0 * *(u1+ix)) + parms.cy * (*(u1+ix+1) + *(u1+ix-1) - 2.0 * *(u1+ix)); } } if (down != NONE){ int chunk=sizep/omp_get_num_threads(); #pragma omp for schedule(static,chunk) private(iy) nowait for(ix=size*(size-2)+1;ix<=size*size-2;ix++){ *(u2+ix) = *(u1+ix) + parms.cx * (*(u1+ix+size) + *(u1+ix-size) - 2.0 * *(u1+ix)) + parms.cy * (*(u1+ix+1) + *(u1+ix-1) - 2.0 * *(u1+ix)); } } if (left != NONE){ int chunk=sizep/omp_get_num_threads(); #pragma omp for schedule(static,chunk) private(iy) nowait for(ix=2*size+1;ix<=size*(size-3)+1;ix+=size){ *(u2+ix) = *(u1+ix) + parms.cx * (*(u1+ix+size) + *(u1+ix-size) - 2.0 * *(u1+ix)) + parms.cy * (*(u1+ix+1) + *(u1+ix-1) - 2.0 * *(u1+ix)); } } if (right != NONE){ int chunk=sizep/omp_get_num_threads(); #pragma omp for schedule(static,chunk) private(iy) nowait for(ix=3*size-2;ix<=size*(size-2)-2;ix+=size){ *(u2+ix) = *(u1+ix) + parms.cx * (*(u1+ix+size) + *(u1+ix-size) - 2.0 * *(u1+ix)) + parms.cy * (*(u1+ix+1) + *(u1+ix-1) - 2.0 * *(u1+ix)); } } } } /***************************************************************************** * subroutine inidat *****************************************************************************/ void inidat(int nx, int size, float *u) { int ix, iy; for (ix = 0; ix <= nx-1; ix++) for (iy = 0; iy <= size-1; iy++) *(u+ix*size+iy) = (float)(ix * (nx - ix - 1) * iy * (size - iy - 1)); } /************************************************************************** * subroutine prtdat **************************************************************************/ void prtdat(int nx, int size, float *u1, char *fnam) { int ix, iy; FILE *fp; fp = fopen(fnam, "w"); for (iy = size-1; iy >= 0; iy--) { for (ix = 0; ix <= nx-1; ix++) { fprintf(fp, "%6.1f", *(u1+ix*size+iy)); if (ix != nx-1) fprintf(fp, " "); else fprintf(fp, "\n"); } } fclose(fp); }
nbody_brute_force.c
/* ** nbody_brute_force.c - nbody simulation using the brute-force algorithm (O(n*n)) ** **/ #include <stdio.h> #include <stdlib.h> #include <pthread.h> #include <math.h> #include <sys/time.h> #include <assert.h> #include <unistd.h> #include <mpi.h> #include <omp.h> #ifdef DISPLAY #include <X11/Xlib.h> #include <X11/Xutil.h> #endif #include "ui.h" #include "nbody.h" #include "nbody_tools.h" FILE* f_out=NULL; int nparticles=10; /* number of particles */ float T_FINAL=1.0; /* simulation end time */ particle_t*particles; double sum_speed_sq = 0; double max_acc = 0; double max_speed = 0; int init_signal = 1; // signaling slaves to initialize first round computing /* MPI_Tag for two types of jobs */ int ACC_TAG = 97; int SPEED_TAG = 96; double dt = 0.01; int step = 0; void init() { /* Nothing to do */ } #ifdef DISPLAY Display *theDisplay; /* These three variables are required to open the */ GC theGC; /* particle plotting window. They are externally */ Window theMain; /* declared in ui.h but are also required here. */ #endif /* compute the force that a particle with position (x_pos, y_pos) and mass 'mass' * applies to particle p */ void compute_force(particle_t*p, double x_pos, double y_pos, double mass) { double x_sep, y_sep, dist_sq, grav_base; x_sep = x_pos - p->x_pos; y_sep = y_pos - p->y_pos; dist_sq = MAX((x_sep*x_sep) + (y_sep*y_sep), 0.01); /* Use the 2-dimensional gravity rule: F = d * (GMm/d^2) */ grav_base = GRAV_CONSTANT*(p->mass)*(mass)/dist_sq; p->x_force += grav_base*x_sep; p->y_force += grav_base*y_sep; } /* compute the new position/velocity */ void move_particle(particle_t*p, double step) { p->x_pos += (p->x_vel)*step; p->y_pos += (p->y_vel)*step; double x_acc = p->x_force/p->mass; double y_acc = p->y_force/p->mass; p->x_vel += x_acc*step; p->y_vel += y_acc*step; /* compute statistics */ double cur_acc = (x_acc*x_acc + y_acc*y_acc); cur_acc = sqrt(cur_acc); double speed_sq = (p->x_vel)*(p->x_vel) + (p->y_vel)*(p->y_vel); double cur_speed = sqrt(speed_sq); sum_speed_sq += speed_sq; max_acc = MAX(max_acc, cur_acc); max_speed = MAX(max_speed, cur_speed); } /* display all the particles */ void draw_all_particles() { int i; for(i=0; i<nparticles; i++) { int x = POS_TO_SCREEN(particles[i].x_pos); int y = POS_TO_SCREEN(particles[i].y_pos); draw_point(x,y); } } void print_all_particles(FILE* f) { int i; for(i=0; i<nparticles; i++) { particle_t*p = &particles[i]; fprintf(f, "particle={pos=(%f,%f), vel=(%f,%f)}\n", p->x_pos, p->y_pos, p->x_vel, p->y_vel); } } /* Simulate the movement of nparticles particles. */ int main(int argc, char**argv) { if(argc >= 2) { nparticles = atoi(argv[1]); } if(argc == 3) { T_FINAL = atof(argv[2]); } init(); int rank, size; /* MPI Initialization */ MPI_Init(&argc, &argv); /* Get the rank of the current task and the number * of MPI processe */ MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Status status; /* Allocate global shared arrays for the particles data set. */ particles = malloc(sizeof(particle_t)*nparticles); all_init_particles(nparticles, particles); /* Initialize thread data structures */ #ifdef DISPLAY /* Open an X window to display the particles */ simple_init (100,100,DISPLAY_SIZE, DISPLAY_SIZE); #endif double t1, t2, duration; /* Start simulation */ double t = 0.0, dt = 0.01; int i, j; int nums_per_proc = nparticles/(size-1); int root_task = nparticles - nums_per_proc*(size-1); particle_t* par_per_proc; /* Create MPI type for collective communication */ MPI_Datatype particle_mpi_t; int blocklens[1] = {7}; MPI_Aint offsets[1] = {0}; MPI_Datatype types[1] = {MPI_DOUBLE}; MPI_Type_create_struct(1, blocklens, offsets, types, &particle_mpi_t); MPI_Type_commit(&particle_mpi_t); /* Pre-define the displacements, counts for gathering particles[..] from slave procs */ int *displs = NULL; int *counts = NULL; if (rank == 0){ displs = malloc(size * sizeof(int)); counts = malloc(size * sizeof(int)); displs[0] = 0; counts[0] = root_task; for (i = 1; i < size; i++) { displs[i] = root_task + nums_per_proc * (i-1); counts[i] = nums_per_proc; } } while (t < T_FINAL && nparticles > 0) { /* Update time. */ t += dt; /* Move particles with the current and compute rms velocity. */ /* 1. Computing task */ if (rank != 0) { // normal tasks for nums_per_proc in nparticles if(step==0) { par_per_proc = malloc(sizeof(particle_t)*nums_per_proc); if (par_per_proc == NULL) { fprintf(stderr, "Fatal: failed to allocate bytes.\n"); abort(); } } #pragma omp parallel for private(i,j) schedule(dynamic) for (i = root_task + nums_per_proc*(rank-1); i < root_task + nums_per_proc * rank; i++){ particles[i].x_force = 0; particles[i].y_force = 0; for(j = 0; j < nparticles; j++) { particle_t*p = &particles[j]; compute_force(&particles[i], p->x_pos, p->y_pos, p->mass); } par_per_proc[i-root_task-nums_per_proc*(rank-1)] = particles[i]; } MPI_Send(&max_acc, 1, MPI_DOUBLE, 0, ACC_TAG, MPI_COMM_WORLD); MPI_Send(&max_speed, 1, MPI_DOUBLE, 0, SPEED_TAG, MPI_COMM_WORLD); } else { // rank==0 nums_per_proc = root_task; /* Alloc particles arrays for current proc */ if(step == 0) { t1 = MPI_Wtime(); printf("t1 = %f\n", t1); par_per_proc = malloc(sizeof(particle_t)*nums_per_proc); if (par_per_proc == NULL) { fprintf(stderr, "Fatal: failed to allocate bytes.\n"); abort(); } } /* Executing computing task of root */ #pragma omp parallel for private(i,j) schedule(dynamic) for (i = 0; i < nums_per_proc; i++){ particles[i].x_force = 0; particles[i].y_force = 0; for(j = 0; j < nparticles; j++) { particle_t*p = &particles[j]; compute_force(&particles[i], p->x_pos, p->y_pos, p->mass); } par_per_proc[i] = particles[i]; } /* Recv the max_acc and max_speed from other procs */ for (i = 1; i < size; i++) { double max_acc_recv, max_speed_recv; MPI_Recv(&max_acc_recv, 1, MPI_DOUBLE, i, ACC_TAG, MPI_COMM_WORLD, &status); MPI_Recv(&max_speed_recv, 1, MPI_DOUBLE, i, SPEED_TAG, MPI_COMM_WORLD, &status); if (max_acc_recv > max_acc) max_acc = max_acc_recv; if (max_speed_recv > max_speed) max_speed = max_speed_recv; } } MPI_Gatherv(par_per_proc, nums_per_proc, particle_mpi_t, particles, counts, displs, particle_mpi_t, 0, MPI_COMM_WORLD); /* 2. Move task (only in root) */ if(rank == 0) { #pragma omp parallel for private(i) schedule(dynamic) for(i = 0; i < nparticles; i++) { move_particle(&particles[i], dt); } } // send new positions, forces, acc MPI_Bcast(particles, nparticles, particle_mpi_t, 0, MPI_COMM_WORLD); MPI_Bcast(&max_speed, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&max_acc, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); /* Adjust dt based on maximum speed and acceleration--this simple rule tries to insure that no velocity will change by more than 10% */ dt = 0.1*max_speed/max_acc; step++; #if DISPLAY clear_display(); draw_all_particles(); flush_display(); #endif } if (rank == 0) { t2 = MPI_Wtime(); printf("t2 = %f\n", t2); duration = t2 - t1; } t2 = MPI_Wtime(); duration = t2 - t1; #ifdef DUMP_RESULT FILE* f_out = fopen("particles.log", "w"); assert(f_out); print_all_particles(f_out); fclose(f_out); #endif free(par_per_proc); free(particles); if (rank == 0) { printf("-----------------------------\n"); printf("nparticles: %d\n", nparticles); printf("T_FINAL: %f\n", T_FINAL); printf("-----------------------------\n"); printf("Simulation took %lf s to complete\n", duration); } #ifdef DISPLAY clear_display(); draw_all_particles(); flush_display(); printf("Hit return to close the window."); getchar(); /* Close the X window used to display the particles */ XCloseDisplay(theDisplay); #endif MPI_Finalize(); return 0; }
dct2_fft2.h
/** * @file dct2_fft2.h * @author Zixuan Jiang, Jiaqi Gu (DREAMPlace) * @date Aug 2019 * @brief All the transforms in this file are implemented based on 2D FFT. * Each transfrom has three steps, 1) preprocess, 2) 2d fft or 2d ifft, 3) postprocess. */ #ifndef DREAMPLACE_DCT2_FFT2_H #define DREAMPLACE_DCT2_FFT2_H #include <math.h> #include <float.h> #include "utility/src/torch.h" #include "utility/src/Msg.h" #include "utility/src/ComplexNumber.h" DREAMPLACE_BEGIN_NAMESPACE #define CHECK_CPU(x) AT_ASSERTM(!x.is_cuda(), #x "must be a tensor on CPU") #define CHECK_FLAT(x) AT_ASSERTM(!x.is_cuda() && x.ndimension() == 1, #x "must be a flat tensor on GPU") #define CHECK_EVEN(x) AT_ASSERTM((x.numel()&1) == 0, #x "must have even number of elements") #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x "must be contiguous") void dct2_fft2_forward( at::Tensor x, at::Tensor expkM, at::Tensor expkN, at::Tensor out, at::Tensor buf, int num_threads); void idct2_fft2_forward( at::Tensor x, at::Tensor expkM, at::Tensor expkN, at::Tensor out, at::Tensor buf, int num_threads); void idct_idxst_forward( at::Tensor x, at::Tensor expkM, at::Tensor expkN, at::Tensor out, at::Tensor buf, int num_threads); void idxst_idct_forward( at::Tensor x, at::Tensor expkM, at::Tensor expkN, at::Tensor out, at::Tensor buf, int num_threads); inline int INDEX(const int hid, const int wid, const int N) { return (hid * N + wid); } template <typename T> void dct2dPreprocessCpu( const T* x, T* y, const int M, const int N, int num_threads) { int halfN = N / 2; #pragma omp parallel for num_threads(num_threads) for(int hid = 0; hid < M; ++hid) { for(int wid = 0; wid < N; ++wid) { int index; int cond = (((hid & 1) == 0) << 1) | ((wid & 1) == 0); switch (cond) { case 0: index = INDEX(2 * M - (hid + 1), N - (wid + 1) / 2, halfN); break; case 1: index = INDEX(2 * M - (hid + 1), wid / 2, halfN); break; case 2: index = INDEX(hid, N - (wid + 1) / 2, halfN); break; case 3: index = INDEX(hid, wid / 2, halfN); break; default: break; } y[index] = x[INDEX(hid, wid, N)]; } } } template <typename T> void dct2dPreprocessCpuLauncher( const T* x, T* y, const int M, const int N, int num_threads) { dct2dPreprocessCpu<T>(x, y, M, N, num_threads); } template <typename T, typename TComplex> void dct2dPostprocessCpu( const TComplex* V, T* y, const int M, const int N, const TComplex* expkM, const TComplex* expkN, int num_threads) { int halfM = M / 2; int halfN = N / 2; T four_over_MN =(T)(4. / (M * N)); T two_over_MN =(T)(2. / (M * N)); #pragma omp parallel for num_threads(num_threads) for (int hid = 0; hid < halfM; ++hid) { for (int wid = 0; wid < halfN; ++wid) { int cond = ((hid != 0) << 1) | (wid != 0); switch (cond) { case 0: { y[0] = V[0].x * four_over_MN; y[halfN] = RealPartOfMul(expkN[halfN], V[halfN]) * four_over_MN; y[INDEX(halfM, 0, N)] = expkM[halfM].x * V[INDEX(halfM, 0, halfN + 1)].x * four_over_MN; y[INDEX(halfM, halfN, N)] = expkM[halfM].x * RealPartOfMul(expkN[halfN], V[INDEX(halfM, halfN, halfN + 1)]) * four_over_MN; break; } case 1: { ComplexType<T> tmp; tmp = V[wid]; y[wid] = RealPartOfMul(expkN[wid], tmp) * four_over_MN; y[N - wid] = -ImaginaryPartOfMul(expkN[wid], tmp) * four_over_MN; tmp = V[INDEX(halfM, wid, halfN + 1)]; y[INDEX(halfM, wid, N)] = expkM[halfM].x * RealPartOfMul(expkN[wid], tmp) * four_over_MN; y[INDEX(halfM, N - wid, N)] = -expkM[halfM].x * ImaginaryPartOfMul(expkN[wid], tmp) * four_over_MN; break; } case 2: { ComplexType<T> tmp1, tmp2, tmp_up, tmp_down; tmp1 = V[INDEX(hid, 0, halfN + 1)]; tmp2 = V[INDEX(M - hid, 0, halfN + 1)]; tmp_up.x = expkM[hid].x * (tmp1.x + tmp2.x) + expkM[hid].y * (tmp2.y - tmp1.y); tmp_down.x = -expkM[hid].y * (tmp1.x + tmp2.x) + expkM[hid].x * (tmp2.y - tmp1.y); y[INDEX(hid, 0, N)] = tmp_up.x * two_over_MN; y[INDEX(M - hid, 0, N)] = tmp_down.x * two_over_MN; tmp1 = complexAdd(V[INDEX(hid, halfN, halfN + 1)], V[INDEX(M - hid, halfN, halfN + 1)]); tmp2 = complexSubtract(V[INDEX(hid, halfN, halfN + 1)], V[INDEX(M - hid, halfN, halfN + 1)]); tmp_up.x = expkM[hid].x * tmp1.x - expkM[hid].y * tmp2.y; tmp_up.y = expkM[hid].x * tmp1.y + expkM[hid].y * tmp2.x; tmp_down.x = -expkM[hid].y * tmp1.x - expkM[hid].x * tmp2.y; tmp_down.y = -expkM[hid].y * tmp1.y + expkM[hid].x * tmp2.x; y[INDEX(hid, halfN, N)] = RealPartOfMul(expkN[halfN], tmp_up) * two_over_MN; y[INDEX(M - hid, halfN, N)] = RealPartOfMul(expkN[halfN], tmp_down) * two_over_MN; break; } case 3: { ComplexType<T> tmp1, tmp2, tmp_up, tmp_down; tmp1 = complexAdd(V[INDEX(hid, wid, halfN + 1)], V[INDEX(M - hid, wid, halfN + 1)]); tmp2 = complexSubtract(V[INDEX(hid, wid, halfN + 1)], V[INDEX(M - hid, wid, halfN + 1)]); tmp_up.x = expkM[hid].x * tmp1.x - expkM[hid].y * tmp2.y; tmp_up.y = expkM[hid].x * tmp1.y + expkM[hid].y * tmp2.x; tmp_down.x = -expkM[hid].y * tmp1.x - expkM[hid].x * tmp2.y; tmp_down.y = -expkM[hid].y * tmp1.y + expkM[hid].x * tmp2.x; y[INDEX(hid, wid, N)] = RealPartOfMul(expkN[wid], tmp_up) * two_over_MN; y[INDEX(M - hid, wid, N)] = RealPartOfMul(expkN[wid], tmp_down) * two_over_MN; y[INDEX(hid, N - wid, N)] = -ImaginaryPartOfMul(expkN[wid], tmp_up) * two_over_MN; y[INDEX(M - hid, N - wid, N)] = -ImaginaryPartOfMul(expkN[wid], tmp_down) * two_over_MN; break; } default: assert(0); break; } } } } template <typename T> void dct2dPostprocessCpuLauncher( const T* x, T* y, const int M, const int N, const T* expkM, const T* expkN, int num_threads) { dct2dPostprocessCpu<T, ComplexType<T>>((ComplexType<T> *)x, y, M, N, (ComplexType<T> *)expkM, (ComplexType<T> *)expkN, num_threads); } template <typename T, typename TComplex> void idct2_fft2PreprocessCpu( const T* input, TComplex* output, const int M, const int N, const TComplex* expkM, const TComplex* expkN, int num_threads) { const int halfM = M / 2; const int halfN = N / 2; #pragma omp parallel for num_threads(num_threads) for (int hid = 0; hid < halfM; ++hid) { for (int wid = 0; wid < halfN; ++wid) { int cond = ((hid != 0) << 1) | (wid != 0); switch (cond) { case 0: { T tmp1; TComplex tmp_up; output[0].x = input[0]; output[0].y = 0; tmp1 = input[halfN]; tmp_up.x = tmp1; tmp_up.y = tmp1; output[halfN] = complexConj(complexMul(expkN[halfN], tmp_up)); tmp1 = input[INDEX(halfM, 0, N)]; tmp_up.x = tmp1; tmp_up.y = tmp1; output[INDEX(halfM, 0, halfN + 1)] = complexConj(complexMul(expkM[halfM], tmp_up)); tmp1 = input[INDEX(halfM, halfN, N)]; tmp_up.x = 0; tmp_up.y = 2 * tmp1; output[INDEX(halfM, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[halfN]), tmp_up)); break; } case 1: { TComplex tmp_up; tmp_up.x = input[wid]; tmp_up.y = input[N - wid]; output[wid] = complexConj(complexMul(expkN[wid], tmp_up)); T tmp1 = input[INDEX(halfM, wid, N)]; T tmp2 = input[INDEX(halfM, N - wid, N)]; tmp_up.x = tmp1 - tmp2; tmp_up.y = tmp1 + tmp2; output[INDEX(halfM, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[wid]), tmp_up)); break; } case 2: { T tmp1, tmp3; TComplex tmp_up, tmp_down; tmp1 = input[INDEX(hid, 0, N)]; tmp3 = input[INDEX(M - hid, 0, N)]; tmp_up.x = tmp1; tmp_up.y = tmp3; tmp_down.x = tmp3; tmp_down.y = tmp1; output[INDEX(hid, 0, halfN + 1)] = complexConj(complexMul(expkM[hid], tmp_up)); output[INDEX(M - hid, 0, halfN + 1)] = complexConj(complexMul(expkM[M - hid], tmp_down)); tmp1 = input[INDEX(hid, halfN, N)]; tmp3 = input[INDEX(M - hid, halfN, N)]; tmp_up.x = tmp1 - tmp3; tmp_up.y = tmp3 + tmp1; tmp_down.x = tmp3 - tmp1; tmp_down.y = tmp1 + tmp3; output[INDEX(hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[halfN]), tmp_up)); output[INDEX(M - hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[halfN]), tmp_down)); break; } case 3: { T tmp1 = input[INDEX(hid, wid, N)]; T tmp2 = input[INDEX(hid, N - wid, N)]; T tmp3 = input[INDEX(M - hid, wid, N)]; T tmp4 = input[INDEX(M - hid, N - wid, N)]; TComplex tmp_up, tmp_down; tmp_up.x = tmp1 - tmp4; tmp_up.y = tmp3 + tmp2; tmp_down.x = tmp3 - tmp2; tmp_down.y = tmp1 + tmp4; output[INDEX(hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[wid]), tmp_up)); output[INDEX(M - hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[wid]), tmp_down)); break; } default: assert(0); break; } } } } template <typename T> void idct2_fft2PreprocessCpuLauncher( const T* x, T* y, const int M, const int N, const T* expkM, const T* expkN, int num_threads) { idct2_fft2PreprocessCpu<T, ComplexType<T>>(x, (ComplexType<T>*)y, M, N, (ComplexType<T>*)expkM, (ComplexType<T>*)expkN, num_threads); } template <typename T> void idct2_fft2PostprocessCpu( const T *x, T *y, const int M, const int N, int num_threads) { int MN = M * N; #pragma omp parallel for num_threads(num_threads) for (int hid = 0; hid < M; ++hid) { for (int wid = 0; wid < N; ++wid) { int cond = ((hid < M / 2) << 1) | (wid < N / 2); int index; switch (cond) { case 0: index = INDEX(((M - hid) << 1) - 1, ((N - wid) << 1) - 1, N); break; case 1: index = INDEX(((M - hid) << 1) - 1, wid << 1, N); break; case 2: index = INDEX(hid << 1, ((N - wid) << 1) - 1, N); break; case 3: index = INDEX(hid << 1, wid << 1, N); break; default: assert(0); break; } y[index] = x[INDEX(hid, wid, N)] * MN; } } } template <typename T> void idct2_fft2PostprocessCpuLauncher( const T *x, T *y, const int M, const int N, int num_threads) { idct2_fft2PostprocessCpu<T>(x, y, M, N, num_threads); } template <typename T, typename TComplex> void idct_idxstPreprocessCpu( const T* input, TComplex* output, const int M, const int N, const TComplex* expkM, const TComplex* expkN, int num_threads) { int halfM = M / 2; int halfN = N / 2; #pragma omp parallel for num_threads(num_threads) for (int hid = 0; hid < halfM; ++hid) { for (int wid = 0; wid < halfN; ++wid) { int cond = ((hid != 0) << 1) | (wid != 0); switch (cond) { case 0: { T tmp1; TComplex tmp_up; output[0].x = 0; output[0].y = 0; tmp1 = input[halfN]; tmp_up.x = tmp1; tmp_up.y = tmp1; output[halfN] = complexConj(complexMul(expkN[halfN], tmp_up)); output[INDEX(halfM, 0, halfN + 1)].x = 0; output[INDEX(halfM, 0, halfN + 1)].y = 0; tmp1 = input[INDEX(halfM, halfN, N)]; tmp_up.x = 0; tmp_up.y = 2 * tmp1; output[INDEX(halfM, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[halfN]), tmp_up)); break; } case 1: { TComplex tmp_up; tmp_up.x = input[N - wid]; tmp_up.y = input[wid]; output[wid] = complexConj(complexMul(expkN[wid], tmp_up)); T tmp1 = input[INDEX(halfM, N - wid, N)]; T tmp2 = input[INDEX(halfM, wid, N)]; tmp_up.x = tmp1 - tmp2; tmp_up.y = tmp1 + tmp2; output[INDEX(halfM, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[wid]), tmp_up)); break; } case 2: { T tmp1, tmp3; TComplex tmp_up, tmp_down; output[INDEX(hid, 0, halfN + 1)].x = 0; output[INDEX(hid, 0, halfN + 1)].y = 0; output[INDEX(M - hid, 0, halfN + 1)].x = 0; output[INDEX(M - hid, 0, halfN + 1)].y = 0; tmp1 = input[INDEX(hid, halfN, N)]; tmp3 = input[INDEX(M - hid, halfN, N)]; tmp_up.x = tmp1 - tmp3; tmp_up.y = tmp3 + tmp1; tmp_down.x = tmp3 - tmp1; tmp_down.y = tmp1 + tmp3; output[INDEX(hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[halfN]), tmp_up)); output[INDEX(M - hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[halfN]), tmp_down)); break; } case 3: { T tmp1 = input[INDEX(hid, N - wid, N)]; T tmp2 = input[INDEX(hid, wid, N)]; T tmp3 = input[INDEX(M - hid, N - wid, N)]; T tmp4 = input[INDEX(M - hid, wid, N)]; TComplex tmp_up, tmp_down; tmp_up.x = tmp1 - tmp4; tmp_up.y = tmp3 + tmp2; tmp_down.x = tmp3 - tmp2; tmp_down.y = tmp1 + tmp4; output[INDEX(hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[wid]), tmp_up)); output[INDEX(M - hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[wid]), tmp_down)); break; } default: assert(0); break; } } } } template <typename T> void idct_idxstPreprocessCpuLauncher( const T* x, T* y, const int M, const int N, const T* expkM, const T* expkN, int num_threads) { idct_idxstPreprocessCpu<T, ComplexType<T>>(x, (ComplexType<T>*)y, M, N, (ComplexType<T>*)expkM, (ComplexType<T>*)expkN, num_threads); } template <typename T> void idct_idxstPostprocessCpu( const T* x, T* y, const int M, const int N, int num_threads) { //const int halfN = N / 2; const int MN = M * N; #pragma omp parallel for num_threads(num_threads) for (int hid = 0; hid < M; ++hid) { for (int wid = 0; wid < N; ++wid) { int cond = ((hid < M / 2) << 1) | (wid < N / 2); int index; switch (cond) { case 0: index = INDEX(((M - hid) << 1) - 1, ((N - wid) << 1) - 1, N); y[index] = -x[INDEX(hid, wid, N)] * MN; break; case 1: index = INDEX(((M - hid) << 1) - 1, wid << 1, N); y[index] = x[INDEX(hid, wid, N)] * MN; break; case 2: index = INDEX(hid << 1, ((N - wid) << 1) - 1, N); y[index] = -x[INDEX(hid, wid, N)] * MN; break; case 3: index = INDEX(hid << 1, wid << 1, N); y[index] = x[INDEX(hid, wid, N)] * MN; break; default: assert(0); break; } } } } template <typename T> void idct_idxstPostprocessCpuLauncher( const T* x, T* y, const int M, const int N, int num_threads) { idct_idxstPostprocessCpu<T>(x, y, M, N, num_threads); } template <typename T, typename TComplex> void idxst_idctPreprocessCpu( const T* input, TComplex* output, const int M, const int N, const TComplex* expkM, const TComplex* expkN, int num_threads) { const int halfM = M / 2; const int halfN = N / 2; #pragma omp parallel for num_threads(num_threads) for (int hid = 0; hid < halfM; ++hid) { for (int wid = 0; wid < halfN; ++wid) { int cond = ((hid != 0) << 1) | (wid != 0); switch (cond) { case 0: { T tmp1; TComplex tmp_up; output[0].x = 0; output[0].y = 0; output[halfN].x = 0; output[halfN].y = 0; tmp1 = input[INDEX(halfM, 0, N)]; tmp_up.x = tmp1; tmp_up.y = tmp1; output[INDEX(halfM, 0, halfN + 1)] = complexConj(complexMul(expkM[halfM], tmp_up)); tmp1 = input[INDEX(halfM, halfN, N)]; tmp_up.x = 0; tmp_up.y = 2 * tmp1; output[INDEX(halfM, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[halfN]), tmp_up)); break; } case 1: { output[wid].x = 0; output[wid].y = 0; TComplex tmp_up; T tmp1 = input[INDEX(halfM, wid, N)]; T tmp2 = input[INDEX(halfM, N - wid, N)]; tmp_up.x = tmp1 - tmp2; tmp_up.y = tmp1 + tmp2; output[INDEX(halfM, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[halfM], expkN[wid]), tmp_up)); break; } case 2: { T tmp1, tmp3; TComplex tmp_up, tmp_down; tmp1 = input[INDEX(M - hid, 0, N)]; tmp3 = input[INDEX(hid, 0, N)]; tmp_up.x = tmp1; tmp_up.y = tmp3; tmp_down.x = tmp3; tmp_down.y = tmp1; output[INDEX(hid, 0, halfN + 1)] = complexConj(complexMul(expkM[hid], tmp_up)); output[INDEX(M - hid, 0, halfN + 1)] = complexConj(complexMul(expkM[M - hid], tmp_down)); tmp1 = input[INDEX(M - hid, halfN, N)]; tmp3 = input[INDEX(hid, halfN, N)]; tmp_up.x = tmp1 - tmp3; tmp_up.y = tmp3 + tmp1; tmp_down.x = tmp3 - tmp1; tmp_down.y = tmp1 + tmp3; output[INDEX(hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[halfN]), tmp_up)); output[INDEX(M - hid, halfN, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[halfN]), tmp_down)); break; } case 3: { T tmp1 = input[INDEX(M - hid, wid, N)]; T tmp2 = input[INDEX(M - hid, N - wid, N)]; T tmp3 = input[INDEX(hid, wid, N)]; T tmp4 = input[INDEX(hid, N - wid, N)]; TComplex tmp_up, tmp_down; tmp_up.x = tmp1 - tmp4; tmp_up.y = tmp3 + tmp2; tmp_down.x = tmp3 - tmp2; tmp_down.y = tmp1 + tmp4; output[INDEX(hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[hid], expkN[wid]), tmp_up)); output[INDEX(M - hid, wid, halfN + 1)] = complexConj(complexMul(complexMul(expkM[M - hid], expkN[wid]), tmp_down)); break; } default: assert(0); break; } } } } template <typename T> void idxst_idctPreprocessCpuLauncher( const T* x, T* y, const int M, const int N, const T* expkM, const T* expkN, int num_threads) { idxst_idctPreprocessCpu<T, ComplexType<T>>(x, (ComplexType<T>*)y, M, N, (ComplexType<T>*)expkM, (ComplexType<T>*)expkN, num_threads); } template <typename T> void idxst_idctPostprocessCpu( const T* x, T* y, const int M, const int N, int num_threads) { //const int halfN = N / 2; const int MN = M * N; #pragma omp parallel for num_threads(num_threads) for (int hid = 0; hid < M; ++hid) { for (int wid = 0; wid < N; ++wid) { int cond = ((hid < M / 2) << 1) | (wid < N / 2); int index; switch (cond) { case 0: index = INDEX(((M - hid) << 1) - 1, ((N - wid) << 1) - 1, N); y[index] = -x[INDEX(hid, wid, N)] * MN; break; case 1: index = INDEX(((M - hid) << 1) - 1, wid << 1, N); y[index] = -x[INDEX(hid, wid, N)] * MN; break; case 2: index = INDEX(hid << 1, ((N - wid) << 1) - 1, N); y[index] = x[INDEX(hid, wid, N)] * MN; break; case 3: index = INDEX(hid << 1, wid << 1, N); y[index] = x[INDEX(hid, wid, N)] * MN; break; default: assert(0); break; } } } } template <typename T> void idxst_idctPostprocessCpuLauncher( const T* x, T* y, const int M, const int N, int num_threads) { idxst_idctPostprocessCpu<T>(x, y, M, N, num_threads); } DREAMPLACE_END_NAMESPACE #endif
is.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 2.3 OpenMP C versions - IS This benchmark is an OpenMP C version of the NPB IS code. The OpenMP C versions are developed by RWCP and derived from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Author: M. Yarrow OpenMP C version: S. Satoh --------------------------------------------------------------------*/ #include "npb-C.h" #include "npbparams.h" #include <stdlib.h> #include <stdio.h> #include <stdint.h> #if defined(_OPENMP) #include <omp.h> #endif /* _OPENMP */ #define STACK_SIZE (8 * 1024 * 1024) /*****************************************************************/ /* For serial IS, buckets are not really req'd to solve NPB1 IS */ /* spec, but their use on some machines improves performance, on */ /* other machines the use of buckets compromises performance, */ /* probably because it is extra computation which is not req'd. */ /* (Note: Mechanism not understood, probably cache related) */ /* Example: SP2-66MhzWN: 50% speedup with buckets */ /* Example: SGI Indy5000: 50% slowdown with buckets */ /* Example: SGI O2000: 400% slowdown with buckets (Wow!) */ /*****************************************************************/ /* #define USE_BUCKETS */ /* buckets are not used in the OpenMP C version */ /******************/ /* default values */ /******************/ #ifndef CLASS #define CLASS 'S' #endif /*************/ /* CLASS S */ /*************/ #if CLASS == 'S' #define TOTAL_KEYS_LOG_2 16 #define MAX_KEY_LOG_2 11 #define NUM_BUCKETS_LOG_2 9 #endif /*************/ /* CLASS W */ /*************/ #if CLASS == 'W' #define TOTAL_KEYS_LOG_2 20 #define MAX_KEY_LOG_2 16 #define NUM_BUCKETS_LOG_2 10 #endif /*************/ /* CLASS A */ /*************/ #if CLASS == 'A' #define TOTAL_KEYS_LOG_2 23 #define MAX_KEY_LOG_2 19 #define NUM_BUCKETS_LOG_2 10 #endif /*************/ /* CLASS B */ /*************/ #if CLASS == 'B' #define TOTAL_KEYS_LOG_2 25 #define MAX_KEY_LOG_2 21 #define NUM_BUCKETS_LOG_2 10 #endif /*************/ /* CLASS C */ /*************/ #if CLASS == 'C' #define TOTAL_KEYS_LOG_2 27 #define MAX_KEY_LOG_2 23 #define NUM_BUCKETS_LOG_2 10 #endif #define TOTAL_KEYS (1 << TOTAL_KEYS_LOG_2) #define MAX_KEY (1 << MAX_KEY_LOG_2) #define NUM_BUCKETS (1 << NUM_BUCKETS_LOG_2) #define NUM_KEYS TOTAL_KEYS #define SIZE_OF_BUFFERS NUM_KEYS #define MAX_ITERATIONS 10 #define TEST_ARRAY_SIZE 5 /*************************************/ /* Typedef: if necessary, change the */ /* size of int here by changing the */ /* int type to, say, long */ /*************************************/ typedef int INT_TYPE; /********************/ /* Some global info */ /********************/ INT_TYPE *key_buff_ptr_global; /* used by full_verify to get */ /* copies of rank info */ int passed_verification; /************************************/ /* These are the three main arrays. */ /* See SIZE_OF_BUFFERS def above */ /************************************/ INT_TYPE key_array[SIZE_OF_BUFFERS], key_buff1[SIZE_OF_BUFFERS], key_buff2[SIZE_OF_BUFFERS], partial_verify_vals[TEST_ARRAY_SIZE]; #ifdef USE_BUCKETS INT_TYPE bucket_size[NUM_BUCKETS], bucket_ptrs[NUM_BUCKETS]; #endif /**********************/ /* Partial verif info */ /**********************/ INT_TYPE test_index_array[TEST_ARRAY_SIZE], test_rank_array[TEST_ARRAY_SIZE], S_test_index_array[TEST_ARRAY_SIZE] = {48427,17148,23627,62548,4431}, S_test_rank_array[TEST_ARRAY_SIZE] = {0,18,346,64917,65463}, W_test_index_array[TEST_ARRAY_SIZE] = {357773,934767,875723,898999,404505}, W_test_rank_array[TEST_ARRAY_SIZE] = {1249,11698,1039987,1043896,1048018}, A_test_index_array[TEST_ARRAY_SIZE] = {2112377,662041,5336171,3642833,4250760}, A_test_rank_array[TEST_ARRAY_SIZE] = {104,17523,123928,8288932,8388264}, B_test_index_array[TEST_ARRAY_SIZE] = {41869,812306,5102857,18232239,26860214}, B_test_rank_array[TEST_ARRAY_SIZE] = {33422937,10244,59149,33135281,99}, C_test_index_array[TEST_ARRAY_SIZE] = {44172927,72999161,74326391,129606274,21736814}, C_test_rank_array[TEST_ARRAY_SIZE] = {61147,882988,266290,133997595,133525895}; /***********************/ /* function prototypes */ /***********************/ double is_randlc( double *X, double *A ); void full_verify( void ); /* * FUNCTION RANDLC (X, A) * * This routine returns a uniform pseudorandom double precision number in the * range (0, 1) by using the linear congruential generator * * x_{k+1} = a x_k (mod 2^46) * * where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers * before repeating. The argument A is the same as 'a' in the above formula, * and X is the same as x_0. A and X must be odd double precision integers * in the range (1, 2^46). The returned value RANDLC is normalized to be * between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain * the new seed x_1, so that subsequent calls to RANDLC using the same * arguments will generate a continuous sequence. * * This routine should produce the same results on any computer with at least * 48 mantissa bits in double precision floating point data. On Cray systems, * double precision should be disabled. * * David H. Bailey October 26, 1990 * * IMPLICIT DOUBLE PRECISION (A-H, O-Z) * SAVE KS, R23, R46, T23, T46 * DATA KS/0/ * * If this is the first call to RANDLC, compute R23 = 2 ^ -23, R46 = 2 ^ -46, * T23 = 2 ^ 23, and T46 = 2 ^ 46. These are computed in loops, rather than * by merely using the ** operator, in order to insure that the results are * exact on all systems. This code assumes that 0.5D0 is represented exactly. */ /*****************************************************************/ /************* R A N D L C ************/ /************* ************/ /************* portable random number generator ************/ /*****************************************************************/ double is_randlc(X, A) double *X; double *A; { static int KS=0; static double R23, R46, T23, T46; double T1, T2, T3, T4; double A1; double A2; double X1; double X2; double Z; int i, j; if (KS == 0) { R23 = 1.0; R46 = 1.0; T23 = 1.0; T46 = 1.0; for (i=1; i<=23; i++) { R23 = 0.50 * R23; T23 = 2.0 * T23; } for (i=1; i<=46; i++) { R46 = 0.50 * R46; T46 = 2.0 * T46; } KS = 1; } /* Break A into two parts such that A = 2^23 * A1 + A2 and set X = N. */ T1 = R23 * *A; j = T1; A1 = j; A2 = *A - T23 * A1; /* Break X into two parts such that X = 2^23 * X1 + X2, compute Z = A1 * X2 + A2 * X1 (mod 2^23), and then X = 2^23 * Z + A2 * X2 (mod 2^46). */ T1 = R23 * *X; j = T1; X1 = j; X2 = *X - T23 * X1; T1 = A1 * X2 + A2 * X1; j = R23 * T1; T2 = j; Z = T1 - T23 * T2; T3 = T23 * Z + A2 * X2; j = R46 * T3; T4 = j; *X = T3 - T46 * T4; return(R46 * *X); } /*****************************************************************/ /************* C R E A T E _ S E Q ************/ /*****************************************************************/ void create_seq( double seed, double a ) { double x; int i, j, k; k = MAX_KEY/4; for (i=0; i<NUM_KEYS; i++) { x = is_randlc(&seed, &a); x += is_randlc(&seed, &a); x += is_randlc(&seed, &a); x += is_randlc(&seed, &a); key_array[i] = k*x; } } /*****************************************************************/ /************* F U L L _ V E R I F Y ************/ /*****************************************************************/ void full_verify() { INT_TYPE i, j; INT_TYPE k; INT_TYPE m, unique_keys; /* Now, finally, sort the keys: */ for( i=0; i<NUM_KEYS; i++ ) key_array[--key_buff_ptr_global[key_buff2[i]]] = key_buff2[i]; /* Confirm keys correctly sorted: count incorrectly sorted keys, if any */ j = 0; for( i=1; i<NUM_KEYS; i++ ) if( key_array[i-1] > key_array[i] ) j++; if( j != 0 ) { printf( "Full_verify: number of keys out of sort: %d\n", j ); } else passed_verification++; } /*****************************************************************/ /************* R A N K ****************/ /*****************************************************************/ void rank( int iteration ) { INT_TYPE i, j, k; INT_TYPE l, m; INT_TYPE shift = MAX_KEY_LOG_2 - NUM_BUCKETS_LOG_2; INT_TYPE key; INT_TYPE min_key_val, max_key_val; INT_TYPE prv_buff1[MAX_KEY]; #pragma omp master { key_array[iteration] = iteration; key_array[iteration+MAX_ITERATIONS] = MAX_KEY - iteration; /* Determine where the partial verify test keys are, load into */ /* top of array bucket_size */ for( i=0; i<TEST_ARRAY_SIZE; i++ ) partial_verify_vals[i] = key_array[test_index_array[i]]; /* Clear the work array */ for( i=0; i<MAX_KEY; i++ ) key_buff1[i] = 0; } #pragma omp barrier for (i=0; i<MAX_KEY; i++) prv_buff1[i] = 0; /* Copy keys into work array; keys in key_array will be reused each iter. */ #pragma omp for nowait for( i=0; i<NUM_KEYS; i++ ) { key_buff2[i] = key_array[i]; /* Ranking of all keys occurs in this section: */ /* In this section, the keys themselves are used as their own indexes to determine how many of each there are: their individual population */ prv_buff1[key_buff2[i]]++; /* Now they have individual key */ } /* population */ for( i=0; i<MAX_KEY-1; i++ ) prv_buff1[i+1] += prv_buff1[i]; #pragma omp critical { for( i=0; i<MAX_KEY; i++ ) key_buff1[i] += prv_buff1[i]; } /* To obtain ranks of each key, successively add the individual key population, not forgetting to add m, the total of lesser keys, to the first key population */ #pragma omp barrier #pragma omp master { /* This is the partial verify test section */ /* Observe that test_rank_array vals are */ /* shifted differently for different cases */ for( i=0; i<TEST_ARRAY_SIZE; i++ ) { k = partial_verify_vals[i]; /* test vals were put here */ if( 0 <= k && k <= NUM_KEYS-1 ) switch( CLASS ) { case 'S': if( i <= 2 ) { if( key_buff1[k-1] != test_rank_array[i]+iteration ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } else { if( key_buff1[k-1] != test_rank_array[i]-iteration ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } break; case 'W': if( i < 2 ) { if( key_buff1[k-1] != test_rank_array[i]+(iteration-2) ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } else { if( key_buff1[k-1] != test_rank_array[i]-iteration ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } break; case 'A': if( i <= 2 ) { if( key_buff1[k-1] != test_rank_array[i]+(iteration-1) ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } else { if( key_buff1[k-1] != test_rank_array[i]-(iteration-1) ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } break; case 'B': if( i == 1 || i == 2 || i == 4 ) { if( key_buff1[k-1] != test_rank_array[i]+iteration ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } else { if( key_buff1[k-1] != test_rank_array[i]-iteration ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } break; case 'C': if( i <= 2 ) { if( key_buff1[k-1] != test_rank_array[i]+iteration ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } else { if( key_buff1[k-1] != test_rank_array[i]-iteration ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } break; } } /* Make copies of rank info for use by full_verify: these variables in rank are local; making them global slows down the code, probably since they cannot be made register by compiler */ if( iteration == MAX_ITERATIONS ) key_buff_ptr_global = key_buff1; } /* end master */ } /*****************************************************************/ /************* M A I N ****************/ /*****************************************************************/ static int realmain(void *cargv) { unsigned argv = (unsigned)((long)cargv); int i, iteration, itemp; int nthreads = 1; double timecounter, maxtime; omp_set_num_threads(argv); /* Initialize the verification arrays if a valid class */ for( i=0; i<TEST_ARRAY_SIZE; i++ ) switch( CLASS ) { case 'S': test_index_array[i] = S_test_index_array[i]; test_rank_array[i] = S_test_rank_array[i]; break; case 'A': test_index_array[i] = A_test_index_array[i]; test_rank_array[i] = A_test_rank_array[i]; break; case 'W': test_index_array[i] = W_test_index_array[i]; test_rank_array[i] = W_test_rank_array[i]; break; case 'B': test_index_array[i] = B_test_index_array[i]; test_rank_array[i] = B_test_rank_array[i]; break; case 'C': test_index_array[i] = C_test_index_array[i]; test_rank_array[i] = C_test_rank_array[i]; break; }; /* Printout initial NPB info */ printf( "\n\n NAS Parallel Benchmarks 2.3 OpenMP C version" " - IS Benchmark\n\n" ); printf( " Size: %d (class %c)\n", TOTAL_KEYS, CLASS ); printf( " Iterations: %d\n", MAX_ITERATIONS ); /* Initialize timer */ timer_clear( 0 ); /* Generate random number sequence and subsequent keys on all procs */ create_seq( 314159265.00, /* Random number gen seed */ 1220703125.00 ); /* Random number gen mult */ /* Do one interation for free (i.e., untimed) to guarantee initialization of all data and code pages and respective tables */ #pragma omp parallel rank( 1 ); /* Start verification counter */ passed_verification = 0; if( CLASS != 'S' ) printf( "\n iteration\n" ); /* Start timer */ timer_start( 0 ); /* This is the main iteration */ #pragma omp parallel private(iteration) for( iteration=1; iteration<=MAX_ITERATIONS; iteration++ ) { //#pragma omp master //if( CLASS != 'S' ) printf( " %d\n", iteration ); rank( iteration ); #if defined(_OPENMP) #pragma omp master nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /* End of timing, obtain maximum time of all processors */ timer_stop( 0 ); timecounter = timer_read( 0 ); /* This tests that keys are in sequence: sorting of last ranked key seq occurs here, but is an untimed operation */ full_verify(); /* The final printout */ if( passed_verification != 5*MAX_ITERATIONS + 1 ) { passed_verification = 0; } #ifdef BOMP backend_create_time(argv); #endif printf("Computetime %d %f\n", argv, timecounter); printf("client done\n"); /* c_print_results( "IS", */ /* CLASS, */ /* TOTAL_KEYS, */ /* 0, */ /* 0, */ /* MAX_ITERATIONS, */ /* nthreads, */ /* timecounter, */ /* ((double) (MAX_ITERATIONS*TOTAL_KEYS)) */ /* /timecounter/1000000., */ /* "keys ranked", */ /* passed_verification, */ /* NPBVERSION, */ /* COMPILETIME, */ /* CC, */ /* CLINK, */ /* C_LIB, */ /* C_INC, */ /* CFLAGS, */ /* CLINKFLAGS, */ /* "randlc"); */ /**************************/ } /* E N D P R O G R A M */ /**************************/ #define STACK_SIZE (8 * 1024 * 1024) int main(int argc, char** argv) { if (argc != 2) { /* Print usage */ printf("Usage: %s <Number of threads>\n", argv[0]); exit(-1); } #ifdef BOMP backend_span_domain(atoi(argv[1]), STACK_SIZE); bomp_custom_init(NULL); backend_thread_create_varstack(realmain, (void*)((uint64_t)atoi(argv[1])), STACK_SIZE); backend_thread_exit(); #else /* BOMP */ realmain((void*)((long)atoi(argv[1]))); #endif /* BOMP */ }
9280.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "3mm.h" /* Array initialization. */ static void init_array(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nk; j++) A[i][j] = ((DATA_TYPE) i*j) / ni; for (i = 0; i < nk; i++) for (j = 0; j < nj; j++) B[i][j] = ((DATA_TYPE) i*(j+1)) / nj; for (i = 0; i < nj; i++) for (j = 0; j < nm; j++) C[i][j] = ((DATA_TYPE) i*(j+3)) / nl; for (i = 0; i < nm; i++) for (j = 0; j < nl; j++) D[i][j] = ((DATA_TYPE) i*(j+2)) / nk; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nl, DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_3mm(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl), DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j, k; #pragma scop #pragma omp parallel private (i, j, k) num_threads(#P11) { /* E := A*B */ #pragma omp for (i = 0; i < _PB_NI; i++) { #pragma omp for (j = 0; j < _PB_NJ; j++) { E[i][j] = 0; for (k = 0; k < _PB_NK; ++k) E[i][j] += A[i][k] * B[k][j]; } } /* F := C*D */ #pragma omp for (i = 0; i < _PB_NJ; i++) { #pragma omp for (j = 0; j < _PB_NL; j++) { F[i][j] = 0; for (k = 0; k < _PB_NM; ++k) F[i][j] += C[i][k] * D[k][j]; } } /* G := E*F */ #pragma omp for (i = 0; i < _PB_NI; i++) { #pragma omp for (j = 0; j < _PB_NL; j++) { G[i][j] = 0; for (k = 0; k < _PB_NJ; ++k) G[i][j] += E[i][k] * F[k][j]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; int nl = NL; int nm = NM; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj); POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl); POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm); POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl); POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl); /* Initialize array(s). */ init_array (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_3mm (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(E), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(G)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G))); /* Be clean. */ POLYBENCH_FREE_ARRAY(E); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(F); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(D); POLYBENCH_FREE_ARRAY(G); return 0; }
strassen.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /**********************************************************************************************/ /* * Copyright (c) 1996 Massachusetts Institute of Technology * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to use, copy, modify, and distribute the Software without * restriction, provided the Software, including any modified copies made * under this license, is not distributed for a fee, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE MASSACHUSETTS INSTITUTE OF TECHNOLOGY BE LIABLE * FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * /WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * Except as contained in this notice, the name of the Massachusetts * Institute of Technology shall not be used in advertising or otherwise * to promote the sale, use or other dealings in this Software without * prior written authorization from the Massachusetts Institute of * Technology. * */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include "app-desc.h" #include "bots.h" #include "strassen.h" /*********************************************************************** * Naive sequential algorithm, for comparison purposes **********************************************************************/ void matrixmul(int n, REAL *A, int an, REAL *B, int bn, REAL *C, int cn) { int i, j, k; REAL s; for (i = 0; i < n; ++i) { for (j = 0; j < n; ++j) { s = 0.0; for (k = 0; k < n; ++k) s += ELEM(A, an, i, k) * ELEM(B, bn, k, j); ELEM(C, cn, i, j) = s; } } } /***************************************************************************** ** ** FastNaiveMatrixMultiply ** ** For small to medium sized matrices A, B, and C of size ** MatrixSize * MatrixSize this function performs the operation ** C = A x B efficiently. ** ** Note MatrixSize must be divisible by 8. ** ** INPUT: ** C = (*C WRITE) Address of top left element of matrix C. ** A = (*A IS READ ONLY) Address of top left element of matrix A. ** B = (*B IS READ ONLY) Address of top left element of matrix B. ** MatrixSize = Size of matrices (for n*n matrix, MatrixSize = n) ** RowWidthA = Number of elements in memory between A[x,y] and A[x,y+1] ** RowWidthB = Number of elements in memory between B[x,y] and B[x,y+1] ** RowWidthC = Number of elements in memory between C[x,y] and C[x,y+1] ** ** OUTPUT: ** C = (*C WRITE) Matrix C contains A x B. (Initial value of *C undefined.) ** *****************************************************************************/ void FastNaiveMatrixMultiply(REAL *C, REAL *A, REAL *B, unsigned MatrixSize, unsigned RowWidthC, unsigned RowWidthA, unsigned RowWidthB) { /* Assumes size of real is 8 bytes */ PTR RowWidthBInBytes = RowWidthB << 3; PTR RowWidthAInBytes = RowWidthA << 3; PTR MatrixWidthInBytes = MatrixSize << 3; PTR RowIncrementC = ( RowWidthC - MatrixSize) << 3; unsigned Horizontal, Vertical; REAL *ARowStart = A; for (Vertical = 0; Vertical < MatrixSize; Vertical++) { for (Horizontal = 0; Horizontal < MatrixSize; Horizontal += 8) { REAL *BColumnStart = B + Horizontal; REAL FirstARowValue = *ARowStart++; REAL Sum0 = FirstARowValue * (*BColumnStart); REAL Sum1 = FirstARowValue * (*(BColumnStart+1)); REAL Sum2 = FirstARowValue * (*(BColumnStart+2)); REAL Sum3 = FirstARowValue * (*(BColumnStart+3)); REAL Sum4 = FirstARowValue * (*(BColumnStart+4)); REAL Sum5 = FirstARowValue * (*(BColumnStart+5)); REAL Sum6 = FirstARowValue * (*(BColumnStart+6)); REAL Sum7 = FirstARowValue * (*(BColumnStart+7)); unsigned Products; for (Products = 1; Products < MatrixSize; Products++) { REAL ARowValue = *ARowStart++; BColumnStart = (REAL*) (((PTR) BColumnStart) + RowWidthBInBytes); Sum0 += ARowValue * (*BColumnStart); Sum1 += ARowValue * (*(BColumnStart+1)); Sum2 += ARowValue * (*(BColumnStart+2)); Sum3 += ARowValue * (*(BColumnStart+3)); Sum4 += ARowValue * (*(BColumnStart+4)); Sum5 += ARowValue * (*(BColumnStart+5)); Sum6 += ARowValue * (*(BColumnStart+6)); Sum7 += ARowValue * (*(BColumnStart+7)); } ARowStart = (REAL*) ( ((PTR) ARowStart) - MatrixWidthInBytes); *(C) = Sum0; *(C+1) = Sum1; *(C+2) = Sum2; *(C+3) = Sum3; *(C+4) = Sum4; *(C+5) = Sum5; *(C+6) = Sum6; *(C+7) = Sum7; C+=8; } ARowStart = (REAL*) ( ((PTR) ARowStart) + RowWidthAInBytes ); C = (REAL*) ( ((PTR) C) + RowIncrementC ); } } /***************************************************************************** ** ** FastAdditiveNaiveMatrixMultiply ** ** For small to medium sized matrices A, B, and C of size ** MatrixSize * MatrixSize this function performs the operation ** C += A x B efficiently. ** ** Note MatrixSize must be divisible by 8. ** ** INPUT: ** C = (*C READ/WRITE) Address of top left element of matrix C. ** A = (*A IS READ ONLY) Address of top left element of matrix A. ** B = (*B IS READ ONLY) Address of top left element of matrix B. ** MatrixSize = Size of matrices (for n*n matrix, MatrixSize = n) ** RowWidthA = Number of elements in memory between A[x,y] and A[x,y+1] ** RowWidthB = Number of elements in memory between B[x,y] and B[x,y+1] ** RowWidthC = Number of elements in memory between C[x,y] and C[x,y+1] ** ** OUTPUT: ** C = (*C READ/WRITE) Matrix C contains C + A x B. ** *****************************************************************************/ void FastAdditiveNaiveMatrixMultiply(REAL *C, REAL *A, REAL *B, unsigned MatrixSize, unsigned RowWidthC, unsigned RowWidthA, unsigned RowWidthB) { /* Assumes size of real is 8 bytes */ PTR RowWidthBInBytes = RowWidthB << 3; PTR RowWidthAInBytes = RowWidthA << 3; PTR MatrixWidthInBytes = MatrixSize << 3; PTR RowIncrementC = ( RowWidthC - MatrixSize) << 3; unsigned Horizontal, Vertical; REAL *ARowStart = A; for (Vertical = 0; Vertical < MatrixSize; Vertical++) { for (Horizontal = 0; Horizontal < MatrixSize; Horizontal += 8) { REAL *BColumnStart = B + Horizontal; REAL Sum0 = *C; REAL Sum1 = *(C+1); REAL Sum2 = *(C+2); REAL Sum3 = *(C+3); REAL Sum4 = *(C+4); REAL Sum5 = *(C+5); REAL Sum6 = *(C+6); REAL Sum7 = *(C+7); unsigned Products; for (Products = 0; Products < MatrixSize; Products++) { REAL ARowValue = *ARowStart++; Sum0 += ARowValue * (*BColumnStart); Sum1 += ARowValue * (*(BColumnStart+1)); Sum2 += ARowValue * (*(BColumnStart+2)); Sum3 += ARowValue * (*(BColumnStart+3)); Sum4 += ARowValue * (*(BColumnStart+4)); Sum5 += ARowValue * (*(BColumnStart+5)); Sum6 += ARowValue * (*(BColumnStart+6)); Sum7 += ARowValue * (*(BColumnStart+7)); BColumnStart = (REAL*) (((PTR) BColumnStart) + RowWidthBInBytes); } ARowStart = (REAL*) ( ((PTR) ARowStart) - MatrixWidthInBytes); *(C) = Sum0; *(C+1) = Sum1; *(C+2) = Sum2; *(C+3) = Sum3; *(C+4) = Sum4; *(C+5) = Sum5; *(C+6) = Sum6; *(C+7) = Sum7; C+=8; } ARowStart = (REAL*) ( ((PTR) ARowStart) + RowWidthAInBytes ); C = (REAL*) ( ((PTR) C) + RowIncrementC ); } } /***************************************************************************** ** ** MultiplyByDivideAndConquer ** ** For medium to medium-large (would you like fries with that) sized ** matrices A, B, and C of size MatrixSize * MatrixSize this function ** efficiently performs the operation ** C = A x B (if AdditiveMode == 0) ** C += A x B (if AdditiveMode != 0) ** ** Note MatrixSize must be divisible by 16. ** ** INPUT: ** C = (*C READ/WRITE) Address of top left element of matrix C. ** A = (*A IS READ ONLY) Address of top left element of matrix A. ** B = (*B IS READ ONLY) Address of top left element of matrix B. ** MatrixSize = Size of matrices (for n*n matrix, MatrixSize = n) ** RowWidthA = Number of elements in memory between A[x,y] and A[x,y+1] ** RowWidthB = Number of elements in memory between B[x,y] and B[x,y+1] ** RowWidthC = Number of elements in memory between C[x,y] and C[x,y+1] ** AdditiveMode = 0 if we want C = A x B, otherwise we'll do C += A x B ** ** OUTPUT: ** C (+)= A x B. (+ if AdditiveMode != 0) ** *****************************************************************************/ void MultiplyByDivideAndConquer(REAL *C, REAL *A, REAL *B, unsigned MatrixSize, unsigned RowWidthC, unsigned RowWidthA, unsigned RowWidthB, int AdditiveMode ) { #define A00 A #define B00 B #define C00 C REAL *A01, *A10, *A11, *B01, *B10, *B11, *C01, *C10, *C11; unsigned QuadrantSize = MatrixSize >> 1; /* partition the matrix */ A01 = A00 + QuadrantSize; A10 = A00 + RowWidthA * QuadrantSize; A11 = A10 + QuadrantSize; B01 = B00 + QuadrantSize; B10 = B00 + RowWidthB * QuadrantSize; B11 = B10 + QuadrantSize; C01 = C00 + QuadrantSize; C10 = C00 + RowWidthC * QuadrantSize; C11 = C10 + QuadrantSize; if (QuadrantSize > SizeAtWhichNaiveAlgorithmIsMoreEfficient) { MultiplyByDivideAndConquer(C00, A00, B00, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, AdditiveMode); MultiplyByDivideAndConquer(C01, A00, B01, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, AdditiveMode); MultiplyByDivideAndConquer(C11, A10, B01, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, AdditiveMode); MultiplyByDivideAndConquer(C10, A10, B00, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, AdditiveMode); MultiplyByDivideAndConquer(C00, A01, B10, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, 1); MultiplyByDivideAndConquer(C01, A01, B11, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, 1); MultiplyByDivideAndConquer(C11, A11, B11, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, 1); MultiplyByDivideAndConquer(C10, A11, B10, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, 1); } else { if (AdditiveMode) { FastAdditiveNaiveMatrixMultiply(C00, A00, B00, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastAdditiveNaiveMatrixMultiply(C01, A00, B01, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastAdditiveNaiveMatrixMultiply(C11, A10, B01, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastAdditiveNaiveMatrixMultiply(C10, A10, B00, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); } else { FastNaiveMatrixMultiply(C00, A00, B00, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastNaiveMatrixMultiply(C01, A00, B01, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastNaiveMatrixMultiply(C11, A10, B01, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastNaiveMatrixMultiply(C10, A10, B00, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); } FastAdditiveNaiveMatrixMultiply(C00, A01, B10, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastAdditiveNaiveMatrixMultiply(C01, A01, B11, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastAdditiveNaiveMatrixMultiply(C11, A11, B11, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); FastAdditiveNaiveMatrixMultiply(C10, A11, B10, QuadrantSize, RowWidthC, RowWidthA, RowWidthB); } return; } /***************************************************************************** ** ** OptimizedStrassenMultiply ** ** For large matrices A, B, and C of size MatrixSize * MatrixSize this ** function performs the operation C = A x B efficiently. ** ** INPUT: ** C = (*C WRITE) Address of top left element of matrix C. ** A = (*A IS READ ONLY) Address of top left element of matrix A. ** B = (*B IS READ ONLY) Address of top left element of matrix B. ** MatrixSize = Size of matrices (for n*n matrix, MatrixSize = n) ** RowWidthA = Number of elements in memory between A[x,y] and A[x,y+1] ** RowWidthB = Number of elements in memory between B[x,y] and B[x,y+1] ** RowWidthC = Number of elements in memory between C[x,y] and C[x,y+1] ** ** OUTPUT: ** C = (*C WRITE) Matrix C contains A x B. (Initial value of *C undefined.) ** *****************************************************************************/ void OptimizedStrassenMultiply_seq(REAL *C, REAL *A, REAL *B, unsigned MatrixSize, unsigned RowWidthC, unsigned RowWidthA, unsigned RowWidthB, int Depth) { unsigned QuadrantSize = MatrixSize >> 1; /* MatixSize / 2 */ unsigned QuadrantSizeInBytes = sizeof(REAL) * QuadrantSize * QuadrantSize + 32; unsigned Column, Row; /************************************************************************ ** For each matrix A, B, and C, we'll want pointers to each quandrant ** in the matrix. These quandrants will be addressed as follows: ** -- -- ** | A11 A12 | ** | | ** | A21 A22 | ** -- -- ************************************************************************/ REAL /* *A11, *B11, *C11, */ *A12, *B12, *C12, *A21, *B21, *C21, *A22, *B22, *C22; REAL *S1,*S2,*S3,*S4,*S5,*S6,*S7,*S8,*M2,*M5,*T1sMULT; #define T2sMULT C22 #define NumberOfVariables 11 PTR TempMatrixOffset = 0; PTR MatrixOffsetA = 0; PTR MatrixOffsetB = 0; char *Heap; void *StartHeap; /* Distance between the end of a matrix row and the start of the next row */ PTR RowIncrementA = ( RowWidthA - QuadrantSize ) << 3; PTR RowIncrementB = ( RowWidthB - QuadrantSize ) << 3; PTR RowIncrementC = ( RowWidthC - QuadrantSize ) << 3; if (MatrixSize <= bots_app_cutoff_value) { MultiplyByDivideAndConquer(C, A, B, MatrixSize, RowWidthC, RowWidthA, RowWidthB, 0); return; } /* Initialize quandrant matrices */ #define A11 A #define B11 B #define C11 C A12 = A11 + QuadrantSize; B12 = B11 + QuadrantSize; C12 = C11 + QuadrantSize; A21 = A + (RowWidthA * QuadrantSize); B21 = B + (RowWidthB * QuadrantSize); C21 = C + (RowWidthC * QuadrantSize); A22 = A21 + QuadrantSize; B22 = B21 + QuadrantSize; C22 = C21 + QuadrantSize; /* Allocate Heap Space Here */ StartHeap = Heap = malloc(QuadrantSizeInBytes * NumberOfVariables); /* ensure that heap is on cache boundary */ if ( ((PTR) Heap) & 31) Heap = (char*) ( ((PTR) Heap) + 32 - ( ((PTR) Heap) & 31) ); /* Distribute the heap space over the variables */ S1 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S2 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S3 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S4 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S5 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S6 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S7 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S8 = (REAL*) Heap; Heap += QuadrantSizeInBytes; M2 = (REAL*) Heap; Heap += QuadrantSizeInBytes; M5 = (REAL*) Heap; Heap += QuadrantSizeInBytes; T1sMULT = (REAL*) Heap; Heap += QuadrantSizeInBytes; /*************************************************************************** ** Step through all columns row by row (vertically) ** (jumps in memory by RowWidth => bad locality) ** (but we want the best locality on the innermost loop) ***************************************************************************/ for (Row = 0; Row < QuadrantSize; Row++) { /************************************************************************* ** Step through each row horizontally (addressing elements in each column) ** (jumps linearly througn memory => good locality) *************************************************************************/ for (Column = 0; Column < QuadrantSize; Column++) { /*********************************************************** ** Within this loop, the following holds for MatrixOffset: ** MatrixOffset = (Row * RowWidth) + Column ** (note: that the unit of the offset is number of reals) ***********************************************************/ /* Element of Global Matrix, such as A, B, C */ #define E(Matrix) (* (REAL*) ( ((PTR) Matrix) + TempMatrixOffset ) ) #define EA(Matrix) (* (REAL*) ( ((PTR) Matrix) + MatrixOffsetA ) ) #define EB(Matrix) (* (REAL*) ( ((PTR) Matrix) + MatrixOffsetB ) ) /* FIXME - may pay to expand these out - got higher speed-ups below */ /* S4 = A12 - ( S2 = ( S1 = A21 + A22 ) - A11 ) */ E(S4) = EA(A12) - ( E(S2) = ( E(S1) = EA(A21) + EA(A22) ) - EA(A11) ); /* S8 = (S6 = B22 - ( S5 = B12 - B11 ) ) - B21 */ E(S8) = ( E(S6) = EB(B22) - ( E(S5) = EB(B12) - EB(B11) ) ) - EB(B21); /* S3 = A11 - A21 */ E(S3) = EA(A11) - EA(A21); /* S7 = B22 - B12 */ E(S7) = EB(B22) - EB(B12); TempMatrixOffset += sizeof(REAL); MatrixOffsetA += sizeof(REAL); MatrixOffsetB += sizeof(REAL); } /* end row loop*/ MatrixOffsetA += RowIncrementA; MatrixOffsetB += RowIncrementB; } /* end column loop */ /* M2 = A11 x B11 */ OptimizedStrassenMultiply_seq(M2, A11, B11, QuadrantSize, QuadrantSize, RowWidthA, RowWidthB, Depth+1); /* M5 = S1 * S5 */ OptimizedStrassenMultiply_seq(M5, S1, S5, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1); /* Step 1 of T1 = S2 x S6 + M2 */ OptimizedStrassenMultiply_seq(T1sMULT, S2, S6, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1); /* Step 1 of T2 = T1 + S3 x S7 */ OptimizedStrassenMultiply_seq(C22, S3, S7, QuadrantSize, RowWidthC /*FIXME*/, QuadrantSize, QuadrantSize, Depth+1); /* Step 1 of C11 = M2 + A12 * B21 */ OptimizedStrassenMultiply_seq(C11, A12, B21, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, Depth+1); /* Step 1 of C12 = S4 x B22 + T1 + M5 */ OptimizedStrassenMultiply_seq(C12, S4, B22, QuadrantSize, RowWidthC, QuadrantSize, RowWidthB, Depth+1); /* Step 1 of C21 = T2 - A22 * S8 */ OptimizedStrassenMultiply_seq(C21, A22, S8, QuadrantSize, RowWidthC, RowWidthA, QuadrantSize, Depth+1); /*************************************************************************** ** Step through all columns row by row (vertically) ** (jumps in memory by RowWidth => bad locality) ** (but we want the best locality on the innermost loop) ***************************************************************************/ for (Row = 0; Row < QuadrantSize; Row++) { /************************************************************************* ** Step through each row horizontally (addressing elements in each column) ** (jumps linearly througn memory => good locality) *************************************************************************/ for (Column = 0; Column < QuadrantSize; Column += 4) { REAL LocalM5_0 = *(M5); REAL LocalM5_1 = *(M5+1); REAL LocalM5_2 = *(M5+2); REAL LocalM5_3 = *(M5+3); REAL LocalM2_0 = *(M2); REAL LocalM2_1 = *(M2+1); REAL LocalM2_2 = *(M2+2); REAL LocalM2_3 = *(M2+3); REAL T1_0 = *(T1sMULT) + LocalM2_0; REAL T1_1 = *(T1sMULT+1) + LocalM2_1; REAL T1_2 = *(T1sMULT+2) + LocalM2_2; REAL T1_3 = *(T1sMULT+3) + LocalM2_3; REAL T2_0 = *(C22) + T1_0; REAL T2_1 = *(C22+1) + T1_1; REAL T2_2 = *(C22+2) + T1_2; REAL T2_3 = *(C22+3) + T1_3; (*(C11)) += LocalM2_0; (*(C11+1)) += LocalM2_1; (*(C11+2)) += LocalM2_2; (*(C11+3)) += LocalM2_3; (*(C12)) += LocalM5_0 + T1_0; (*(C12+1)) += LocalM5_1 + T1_1; (*(C12+2)) += LocalM5_2 + T1_2; (*(C12+3)) += LocalM5_3 + T1_3; (*(C22)) = LocalM5_0 + T2_0; (*(C22+1)) = LocalM5_1 + T2_1; (*(C22+2)) = LocalM5_2 + T2_2; (*(C22+3)) = LocalM5_3 + T2_3; (*(C21 )) = (- *(C21 )) + T2_0; (*(C21+1)) = (- *(C21+1)) + T2_1; (*(C21+2)) = (- *(C21+2)) + T2_2; (*(C21+3)) = (- *(C21+3)) + T2_3; M5 += 4; M2 += 4; T1sMULT += 4; C11 += 4; C12 += 4; C21 += 4; C22 += 4; } C11 = (REAL*) ( ((PTR) C11 ) + RowIncrementC); C12 = (REAL*) ( ((PTR) C12 ) + RowIncrementC); C21 = (REAL*) ( ((PTR) C21 ) + RowIncrementC); C22 = (REAL*) ( ((PTR) C22 ) + RowIncrementC); } free(StartHeap); } #if defined(IF_CUTOFF) void OptimizedStrassenMultiply_par(REAL *C, REAL *A, REAL *B, unsigned MatrixSize, unsigned RowWidthC, unsigned RowWidthA, unsigned RowWidthB, int Depth) { unsigned QuadrantSize = MatrixSize >> 1; /* MatixSize / 2 */ unsigned QuadrantSizeInBytes = sizeof(REAL) * QuadrantSize * QuadrantSize + 32; unsigned Column, Row; /************************************************************************ ** For each matrix A, B, and C, we'll want pointers to each quandrant ** in the matrix. These quandrants will be addressed as follows: ** -- -- ** | A11 A12 | ** | | ** | A21 A22 | ** -- -- ************************************************************************/ REAL /* *A11, *B11, *C11, */ *A12, *B12, *C12, *A21, *B21, *C21, *A22, *B22, *C22; REAL *S1,*S2,*S3,*S4,*S5,*S6,*S7,*S8,*M2,*M5,*T1sMULT; #define T2sMULT C22 #define NumberOfVariables 11 PTR TempMatrixOffset = 0; PTR MatrixOffsetA = 0; PTR MatrixOffsetB = 0; char *Heap; void *StartHeap; /* Distance between the end of a matrix row and the start of the next row */ PTR RowIncrementA = ( RowWidthA - QuadrantSize ) << 3; PTR RowIncrementB = ( RowWidthB - QuadrantSize ) << 3; PTR RowIncrementC = ( RowWidthC - QuadrantSize ) << 3; if (MatrixSize <= bots_app_cutoff_value) { MultiplyByDivideAndConquer(C, A, B, MatrixSize, RowWidthC, RowWidthA, RowWidthB, 0); return; } /* Initialize quandrant matrices */ #define A11 A #define B11 B #define C11 C A12 = A11 + QuadrantSize; B12 = B11 + QuadrantSize; C12 = C11 + QuadrantSize; A21 = A + (RowWidthA * QuadrantSize); B21 = B + (RowWidthB * QuadrantSize); C21 = C + (RowWidthC * QuadrantSize); A22 = A21 + QuadrantSize; B22 = B21 + QuadrantSize; C22 = C21 + QuadrantSize; /* Allocate Heap Space Here */ StartHeap = Heap = malloc(QuadrantSizeInBytes * NumberOfVariables); /* ensure that heap is on cache boundary */ if ( ((PTR) Heap) & 31) Heap = (char*) ( ((PTR) Heap) + 32 - ( ((PTR) Heap) & 31) ); /* Distribute the heap space over the variables */ S1 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S2 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S3 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S4 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S5 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S6 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S7 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S8 = (REAL*) Heap; Heap += QuadrantSizeInBytes; M2 = (REAL*) Heap; Heap += QuadrantSizeInBytes; M5 = (REAL*) Heap; Heap += QuadrantSizeInBytes; T1sMULT = (REAL*) Heap; Heap += QuadrantSizeInBytes; /*************************************************************************** ** Step through all columns row by row (vertically) ** (jumps in memory by RowWidth => bad locality) ** (but we want the best locality on the innermost loop) ***************************************************************************/ for (Row = 0; Row < QuadrantSize; Row++) { /************************************************************************* ** Step through each row horizontally (addressing elements in each column) ** (jumps linearly througn memory => good locality) *************************************************************************/ for (Column = 0; Column < QuadrantSize; Column++) { /*********************************************************** ** Within this loop, the following holds for MatrixOffset: ** MatrixOffset = (Row * RowWidth) + Column ** (note: that the unit of the offset is number of reals) ***********************************************************/ /* Element of Global Matrix, such as A, B, C */ #define E(Matrix) (* (REAL*) ( ((PTR) Matrix) + TempMatrixOffset ) ) #define EA(Matrix) (* (REAL*) ( ((PTR) Matrix) + MatrixOffsetA ) ) #define EB(Matrix) (* (REAL*) ( ((PTR) Matrix) + MatrixOffsetB ) ) /* FIXME - may pay to expand these out - got higher speed-ups below */ /* S4 = A12 - ( S2 = ( S1 = A21 + A22 ) - A11 ) */ E(S4) = EA(A12) - ( E(S2) = ( E(S1) = EA(A21) + EA(A22) ) - EA(A11) ); /* S8 = (S6 = B22 - ( S5 = B12 - B11 ) ) - B21 */ E(S8) = ( E(S6) = EB(B22) - ( E(S5) = EB(B12) - EB(B11) ) ) - EB(B21); /* S3 = A11 - A21 */ E(S3) = EA(A11) - EA(A21); /* S7 = B22 - B12 */ E(S7) = EB(B22) - EB(B12); TempMatrixOffset += sizeof(REAL); MatrixOffsetA += sizeof(REAL); MatrixOffsetB += sizeof(REAL); } /* end row loop*/ MatrixOffsetA += RowIncrementA; MatrixOffsetB += RowIncrementB; } /* end column loop */ /* M2 = A11 x B11 */ #pragma omp task untied if (Depth < bots_cutoff_value) OptimizedStrassenMultiply_par(M2, A11, B11, QuadrantSize, QuadrantSize, RowWidthA, RowWidthB, Depth+1); /* M5 = S1 * S5 */ #pragma omp task untied if (Depth < bots_cutoff_value) OptimizedStrassenMultiply_par(M5, S1, S5, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1); /* Step 1 of T1 = S2 x S6 + M2 */ #pragma omp task untied if (Depth < bots_cutoff_value) OptimizedStrassenMultiply_par(T1sMULT, S2, S6, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1); /* Step 1 of T2 = T1 + S3 x S7 */ #pragma omp task untied if (Depth < bots_cutoff_value) OptimizedStrassenMultiply_par(C22, S3, S7, QuadrantSize, RowWidthC /*FIXME*/, QuadrantSize, QuadrantSize, Depth+1); /* Step 1 of C11 = M2 + A12 * B21 */ #pragma omp task untied if (Depth < bots_cutoff_value) OptimizedStrassenMultiply_par(C11, A12, B21, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, Depth+1); /* Step 1 of C12 = S4 x B22 + T1 + M5 */ #pragma omp task untied if (Depth < bots_cutoff_value) OptimizedStrassenMultiply_par(C12, S4, B22, QuadrantSize, RowWidthC, QuadrantSize, RowWidthB, Depth+1); /* Step 1 of C21 = T2 - A22 * S8 */ #pragma omp task untied if (Depth < bots_cutoff_value) OptimizedStrassenMultiply_par(C21, A22, S8, QuadrantSize, RowWidthC, RowWidthA, QuadrantSize, Depth+1); /********************************************** ** Synchronization Point **********************************************/ #pragma omp taskwait /*************************************************************************** ** Step through all columns row by row (vertically) ** (jumps in memory by RowWidth => bad locality) ** (but we want the best locality on the innermost loop) ***************************************************************************/ for (Row = 0; Row < QuadrantSize; Row++) { /************************************************************************* ** Step through each row horizontally (addressing elements in each column) ** (jumps linearly througn memory => good locality) *************************************************************************/ for (Column = 0; Column < QuadrantSize; Column += 4) { REAL LocalM5_0 = *(M5); REAL LocalM5_1 = *(M5+1); REAL LocalM5_2 = *(M5+2); REAL LocalM5_3 = *(M5+3); REAL LocalM2_0 = *(M2); REAL LocalM2_1 = *(M2+1); REAL LocalM2_2 = *(M2+2); REAL LocalM2_3 = *(M2+3); REAL T1_0 = *(T1sMULT) + LocalM2_0; REAL T1_1 = *(T1sMULT+1) + LocalM2_1; REAL T1_2 = *(T1sMULT+2) + LocalM2_2; REAL T1_3 = *(T1sMULT+3) + LocalM2_3; REAL T2_0 = *(C22) + T1_0; REAL T2_1 = *(C22+1) + T1_1; REAL T2_2 = *(C22+2) + T1_2; REAL T2_3 = *(C22+3) + T1_3; (*(C11)) += LocalM2_0; (*(C11+1)) += LocalM2_1; (*(C11+2)) += LocalM2_2; (*(C11+3)) += LocalM2_3; (*(C12)) += LocalM5_0 + T1_0; (*(C12+1)) += LocalM5_1 + T1_1; (*(C12+2)) += LocalM5_2 + T1_2; (*(C12+3)) += LocalM5_3 + T1_3; (*(C22)) = LocalM5_0 + T2_0; (*(C22+1)) = LocalM5_1 + T2_1; (*(C22+2)) = LocalM5_2 + T2_2; (*(C22+3)) = LocalM5_3 + T2_3; (*(C21 )) = (- *(C21 )) + T2_0; (*(C21+1)) = (- *(C21+1)) + T2_1; (*(C21+2)) = (- *(C21+2)) + T2_2; (*(C21+3)) = (- *(C21+3)) + T2_3; M5 += 4; M2 += 4; T1sMULT += 4; C11 += 4; C12 += 4; C21 += 4; C22 += 4; } C11 = (REAL*) ( ((PTR) C11 ) + RowIncrementC); C12 = (REAL*) ( ((PTR) C12 ) + RowIncrementC); C21 = (REAL*) ( ((PTR) C21 ) + RowIncrementC); C22 = (REAL*) ( ((PTR) C22 ) + RowIncrementC); } free(StartHeap); } #elif defined(MANUAL_CUTOFF) void OptimizedStrassenMultiply_par(REAL *C, REAL *A, REAL *B, unsigned MatrixSize, unsigned RowWidthC, unsigned RowWidthA, unsigned RowWidthB, int Depth) { unsigned QuadrantSize = MatrixSize >> 1; /* MatixSize / 2 */ unsigned QuadrantSizeInBytes = sizeof(REAL) * QuadrantSize * QuadrantSize + 32; unsigned Column, Row; /************************************************************************ ** For each matrix A, B, and C, we'll want pointers to each quandrant ** in the matrix. These quandrants will be addressed as follows: ** -- -- ** | A11 A12 | ** | | ** | A21 A22 | ** -- -- ************************************************************************/ REAL /* *A11, *B11, *C11, */ *A12, *B12, *C12, *A21, *B21, *C21, *A22, *B22, *C22; REAL *S1,*S2,*S3,*S4,*S5,*S6,*S7,*S8,*M2,*M5,*T1sMULT; #define T2sMULT C22 #define NumberOfVariables 11 PTR TempMatrixOffset = 0; PTR MatrixOffsetA = 0; PTR MatrixOffsetB = 0; char *Heap; void *StartHeap; /* Distance between the end of a matrix row and the start of the next row */ PTR RowIncrementA = ( RowWidthA - QuadrantSize ) << 3; PTR RowIncrementB = ( RowWidthB - QuadrantSize ) << 3; PTR RowIncrementC = ( RowWidthC - QuadrantSize ) << 3; if (MatrixSize <= bots_app_cutoff_value) { MultiplyByDivideAndConquer(C, A, B, MatrixSize, RowWidthC, RowWidthA, RowWidthB, 0); return; } /* Initialize quandrant matrices */ #define A11 A #define B11 B #define C11 C A12 = A11 + QuadrantSize; B12 = B11 + QuadrantSize; C12 = C11 + QuadrantSize; A21 = A + (RowWidthA * QuadrantSize); B21 = B + (RowWidthB * QuadrantSize); C21 = C + (RowWidthC * QuadrantSize); A22 = A21 + QuadrantSize; B22 = B21 + QuadrantSize; C22 = C21 + QuadrantSize; /* Allocate Heap Space Here */ StartHeap = Heap = malloc(QuadrantSizeInBytes * NumberOfVariables); /* ensure that heap is on cache boundary */ if ( ((PTR) Heap) & 31) Heap = (char*) ( ((PTR) Heap) + 32 - ( ((PTR) Heap) & 31) ); /* Distribute the heap space over the variables */ S1 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S2 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S3 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S4 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S5 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S6 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S7 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S8 = (REAL*) Heap; Heap += QuadrantSizeInBytes; M2 = (REAL*) Heap; Heap += QuadrantSizeInBytes; M5 = (REAL*) Heap; Heap += QuadrantSizeInBytes; T1sMULT = (REAL*) Heap; Heap += QuadrantSizeInBytes; /*************************************************************************** ** Step through all columns row by row (vertically) ** (jumps in memory by RowWidth => bad locality) ** (but we want the best locality on the innermost loop) ***************************************************************************/ for (Row = 0; Row < QuadrantSize; Row++) { /************************************************************************* ** Step through each row horizontally (addressing elements in each column) ** (jumps linearly througn memory => good locality) *************************************************************************/ for (Column = 0; Column < QuadrantSize; Column++) { /*********************************************************** ** Within this loop, the following holds for MatrixOffset: ** MatrixOffset = (Row * RowWidth) + Column ** (note: that the unit of the offset is number of reals) ***********************************************************/ /* Element of Global Matrix, such as A, B, C */ #define E(Matrix) (* (REAL*) ( ((PTR) Matrix) + TempMatrixOffset ) ) #define EA(Matrix) (* (REAL*) ( ((PTR) Matrix) + MatrixOffsetA ) ) #define EB(Matrix) (* (REAL*) ( ((PTR) Matrix) + MatrixOffsetB ) ) /* FIXME - may pay to expand these out - got higher speed-ups below */ /* S4 = A12 - ( S2 = ( S1 = A21 + A22 ) - A11 ) */ E(S4) = EA(A12) - ( E(S2) = ( E(S1) = EA(A21) + EA(A22) ) - EA(A11) ); /* S8 = (S6 = B22 - ( S5 = B12 - B11 ) ) - B21 */ E(S8) = ( E(S6) = EB(B22) - ( E(S5) = EB(B12) - EB(B11) ) ) - EB(B21); /* S3 = A11 - A21 */ E(S3) = EA(A11) - EA(A21); /* S7 = B22 - B12 */ E(S7) = EB(B22) - EB(B12); TempMatrixOffset += sizeof(REAL); MatrixOffsetA += sizeof(REAL); MatrixOffsetB += sizeof(REAL); } /* end row loop*/ MatrixOffsetA += RowIncrementA; MatrixOffsetB += RowIncrementB; } /* end column loop */ if (Depth < bots_cutoff_value) { /* M2 = A11 x B11 */ #pragma omp task untied OptimizedStrassenMultiply_par(M2, A11, B11, QuadrantSize, QuadrantSize, RowWidthA, RowWidthB, Depth+1); /* M5 = S1 * S5 */ #pragma omp task untied OptimizedStrassenMultiply_par(M5, S1, S5, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1); /* Step 1 of T1 = S2 x S6 + M2 */ #pragma omp task untied OptimizedStrassenMultiply_par(T1sMULT, S2, S6, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1); /* Step 1 of T2 = T1 + S3 x S7 */ #pragma omp task untied OptimizedStrassenMultiply_par(C22, S3, S7, QuadrantSize, RowWidthC /*FIXME*/, QuadrantSize, QuadrantSize, Depth+1); /* Step 1 of C11 = M2 + A12 * B21 */ #pragma omp task untied OptimizedStrassenMultiply_par(C11, A12, B21, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, Depth+1); /* Step 1 of C12 = S4 x B22 + T1 + M5 */ #pragma omp task untied OptimizedStrassenMultiply_par(C12, S4, B22, QuadrantSize, RowWidthC, QuadrantSize, RowWidthB, Depth+1); /* Step 1 of C21 = T2 - A22 * S8 */ #pragma omp task untied OptimizedStrassenMultiply_par(C21, A22, S8, QuadrantSize, RowWidthC, RowWidthA, QuadrantSize, Depth+1); /********************************************** ** Synchronization Point **********************************************/ #pragma omp taskwait } else { /* M2 = A11 x B11 */ OptimizedStrassenMultiply_par(M2, A11, B11, QuadrantSize, QuadrantSize, RowWidthA, RowWidthB, Depth+1); /* M5 = S1 * S5 */ OptimizedStrassenMultiply_par(M5, S1, S5, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1); /* Step 1 of T1 = S2 x S6 + M2 */ OptimizedStrassenMultiply_par(T1sMULT, S2, S6, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1); /* Step 1 of T2 = T1 + S3 x S7 */ OptimizedStrassenMultiply_par(C22, S3, S7, QuadrantSize, RowWidthC /*FIXME*/, QuadrantSize, QuadrantSize, Depth+1); /* Step 1 of C11 = M2 + A12 * B21 */ OptimizedStrassenMultiply_par(C11, A12, B21, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, Depth+1); /* Step 1 of C12 = S4 x B22 + T1 + M5 */ OptimizedStrassenMultiply_par(C12, S4, B22, QuadrantSize, RowWidthC, QuadrantSize, RowWidthB, Depth+1); /* Step 1 of C21 = T2 - A22 * S8 */ OptimizedStrassenMultiply_par(C21, A22, S8, QuadrantSize, RowWidthC, RowWidthA, QuadrantSize, Depth+1); } /*************************************************************************** ** Step through all columns row by row (vertically) ** (jumps in memory by RowWidth => bad locality) ** (but we want the best locality on the innermost loop) ***************************************************************************/ for (Row = 0; Row < QuadrantSize; Row++) { /************************************************************************* ** Step through each row horizontally (addressing elements in each column) ** (jumps linearly througn memory => good locality) *************************************************************************/ for (Column = 0; Column < QuadrantSize; Column += 4) { REAL LocalM5_0 = *(M5); REAL LocalM5_1 = *(M5+1); REAL LocalM5_2 = *(M5+2); REAL LocalM5_3 = *(M5+3); REAL LocalM2_0 = *(M2); REAL LocalM2_1 = *(M2+1); REAL LocalM2_2 = *(M2+2); REAL LocalM2_3 = *(M2+3); REAL T1_0 = *(T1sMULT) + LocalM2_0; REAL T1_1 = *(T1sMULT+1) + LocalM2_1; REAL T1_2 = *(T1sMULT+2) + LocalM2_2; REAL T1_3 = *(T1sMULT+3) + LocalM2_3; REAL T2_0 = *(C22) + T1_0; REAL T2_1 = *(C22+1) + T1_1; REAL T2_2 = *(C22+2) + T1_2; REAL T2_3 = *(C22+3) + T1_3; (*(C11)) += LocalM2_0; (*(C11+1)) += LocalM2_1; (*(C11+2)) += LocalM2_2; (*(C11+3)) += LocalM2_3; (*(C12)) += LocalM5_0 + T1_0; (*(C12+1)) += LocalM5_1 + T1_1; (*(C12+2)) += LocalM5_2 + T1_2; (*(C12+3)) += LocalM5_3 + T1_3; (*(C22)) = LocalM5_0 + T2_0; (*(C22+1)) = LocalM5_1 + T2_1; (*(C22+2)) = LocalM5_2 + T2_2; (*(C22+3)) = LocalM5_3 + T2_3; (*(C21 )) = (- *(C21 )) + T2_0; (*(C21+1)) = (- *(C21+1)) + T2_1; (*(C21+2)) = (- *(C21+2)) + T2_2; (*(C21+3)) = (- *(C21+3)) + T2_3; M5 += 4; M2 += 4; T1sMULT += 4; C11 += 4; C12 += 4; C21 += 4; C22 += 4; } C11 = (REAL*) ( ((PTR) C11 ) + RowIncrementC); C12 = (REAL*) ( ((PTR) C12 ) + RowIncrementC); C21 = (REAL*) ( ((PTR) C21 ) + RowIncrementC); C22 = (REAL*) ( ((PTR) C22 ) + RowIncrementC); } free(StartHeap); } #else void OptimizedStrassenMultiply_par(REAL *C, REAL *A, REAL *B, unsigned MatrixSize, unsigned RowWidthC, unsigned RowWidthA, unsigned RowWidthB, int Depth) { unsigned QuadrantSize = MatrixSize >> 1; /* MatixSize / 2 */ unsigned QuadrantSizeInBytes = sizeof(REAL) * QuadrantSize * QuadrantSize + 32; unsigned Column, Row; /************************************************************************ ** For each matrix A, B, and C, we'll want pointers to each quandrant ** in the matrix. These quandrants will be addressed as follows: ** -- -- ** | A11 A12 | ** | | ** | A21 A22 | ** -- -- ************************************************************************/ REAL /* *A11, *B11, *C11, */ *A12, *B12, *C12, *A21, *B21, *C21, *A22, *B22, *C22; REAL *S1,*S2,*S3,*S4,*S5,*S6,*S7,*S8,*M2,*M5,*T1sMULT; #define T2sMULT C22 #define NumberOfVariables 11 PTR TempMatrixOffset = 0; PTR MatrixOffsetA = 0; PTR MatrixOffsetB = 0; char *Heap; void *StartHeap; /* Distance between the end of a matrix row and the start of the next row */ PTR RowIncrementA = ( RowWidthA - QuadrantSize ) << 3; PTR RowIncrementB = ( RowWidthB - QuadrantSize ) << 3; PTR RowIncrementC = ( RowWidthC - QuadrantSize ) << 3; if (MatrixSize <= bots_app_cutoff_value) { MultiplyByDivideAndConquer(C, A, B, MatrixSize, RowWidthC, RowWidthA, RowWidthB, 0); return; } /* Initialize quandrant matrices */ #define A11 A #define B11 B #define C11 C A12 = A11 + QuadrantSize; B12 = B11 + QuadrantSize; C12 = C11 + QuadrantSize; A21 = A + (RowWidthA * QuadrantSize); B21 = B + (RowWidthB * QuadrantSize); C21 = C + (RowWidthC * QuadrantSize); A22 = A21 + QuadrantSize; B22 = B21 + QuadrantSize; C22 = C21 + QuadrantSize; /* Allocate Heap Space Here */ StartHeap = Heap = malloc(QuadrantSizeInBytes * NumberOfVariables); /* ensure that heap is on cache boundary */ if ( ((PTR) Heap) & 31) Heap = (char*) ( ((PTR) Heap) + 32 - ( ((PTR) Heap) & 31) ); /* Distribute the heap space over the variables */ S1 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S2 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S3 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S4 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S5 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S6 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S7 = (REAL*) Heap; Heap += QuadrantSizeInBytes; S8 = (REAL*) Heap; Heap += QuadrantSizeInBytes; M2 = (REAL*) Heap; Heap += QuadrantSizeInBytes; M5 = (REAL*) Heap; Heap += QuadrantSizeInBytes; T1sMULT = (REAL*) Heap; Heap += QuadrantSizeInBytes; /*************************************************************************** ** Step through all columns row by row (vertically) ** (jumps in memory by RowWidth => bad locality) ** (but we want the best locality on the innermost loop) ***************************************************************************/ for (Row = 0; Row < QuadrantSize; Row++) { /************************************************************************* ** Step through each row horizontally (addressing elements in each column) ** (jumps linearly througn memory => good locality) *************************************************************************/ for (Column = 0; Column < QuadrantSize; Column++) { /*********************************************************** ** Within this loop, the following holds for MatrixOffset: ** MatrixOffset = (Row * RowWidth) + Column ** (note: that the unit of the offset is number of reals) ***********************************************************/ /* Element of Global Matrix, such as A, B, C */ #define E(Matrix) (* (REAL*) ( ((PTR) Matrix) + TempMatrixOffset ) ) #define EA(Matrix) (* (REAL*) ( ((PTR) Matrix) + MatrixOffsetA ) ) #define EB(Matrix) (* (REAL*) ( ((PTR) Matrix) + MatrixOffsetB ) ) /* FIXME - may pay to expand these out - got higher speed-ups below */ /* S4 = A12 - ( S2 = ( S1 = A21 + A22 ) - A11 ) */ E(S4) = EA(A12) - ( E(S2) = ( E(S1) = EA(A21) + EA(A22) ) - EA(A11) ); /* S8 = (S6 = B22 - ( S5 = B12 - B11 ) ) - B21 */ E(S8) = ( E(S6) = EB(B22) - ( E(S5) = EB(B12) - EB(B11) ) ) - EB(B21); /* S3 = A11 - A21 */ E(S3) = EA(A11) - EA(A21); /* S7 = B22 - B12 */ E(S7) = EB(B22) - EB(B12); TempMatrixOffset += sizeof(REAL); MatrixOffsetA += sizeof(REAL); MatrixOffsetB += sizeof(REAL); } /* end row loop*/ MatrixOffsetA += RowIncrementA; MatrixOffsetB += RowIncrementB; } /* end column loop */ /* M2 = A11 x B11 */ #pragma omp task untied OptimizedStrassenMultiply_par(M2, A11, B11, QuadrantSize, QuadrantSize, RowWidthA, RowWidthB, Depth+1); /* M5 = S1 * S5 */ #pragma omp task untied OptimizedStrassenMultiply_par(M5, S1, S5, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1); /* Step 1 of T1 = S2 x S6 + M2 */ #pragma omp task untied OptimizedStrassenMultiply_par(T1sMULT, S2, S6, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1); /* Step 1 of T2 = T1 + S3 x S7 */ #pragma omp task untied OptimizedStrassenMultiply_par(C22, S3, S7, QuadrantSize, RowWidthC /*FIXME*/, QuadrantSize, QuadrantSize, Depth+1); /* Step 1 of C11 = M2 + A12 * B21 */ #pragma omp task untied OptimizedStrassenMultiply_par(C11, A12, B21, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, Depth+1); /* Step 1 of C12 = S4 x B22 + T1 + M5 */ #pragma omp task untied OptimizedStrassenMultiply_par(C12, S4, B22, QuadrantSize, RowWidthC, QuadrantSize, RowWidthB, Depth+1); /* Step 1 of C21 = T2 - A22 * S8 */ #pragma omp task untied OptimizedStrassenMultiply_par(C21, A22, S8, QuadrantSize, RowWidthC, RowWidthA, QuadrantSize, Depth+1); /********************************************** ** Synchronization Point **********************************************/ #pragma omp taskwait /*************************************************************************** ** Step through all columns row by row (vertically) ** (jumps in memory by RowWidth => bad locality) ** (but we want the best locality on the innermost loop) ***************************************************************************/ for (Row = 0; Row < QuadrantSize; Row++) { /************************************************************************* ** Step through each row horizontally (addressing elements in each column) ** (jumps linearly througn memory => good locality) *************************************************************************/ for (Column = 0; Column < QuadrantSize; Column += 4) { REAL LocalM5_0 = *(M5); REAL LocalM5_1 = *(M5+1); REAL LocalM5_2 = *(M5+2); REAL LocalM5_3 = *(M5+3); REAL LocalM2_0 = *(M2); REAL LocalM2_1 = *(M2+1); REAL LocalM2_2 = *(M2+2); REAL LocalM2_3 = *(M2+3); REAL T1_0 = *(T1sMULT) + LocalM2_0; REAL T1_1 = *(T1sMULT+1) + LocalM2_1; REAL T1_2 = *(T1sMULT+2) + LocalM2_2; REAL T1_3 = *(T1sMULT+3) + LocalM2_3; REAL T2_0 = *(C22) + T1_0; REAL T2_1 = *(C22+1) + T1_1; REAL T2_2 = *(C22+2) + T1_2; REAL T2_3 = *(C22+3) + T1_3; (*(C11)) += LocalM2_0; (*(C11+1)) += LocalM2_1; (*(C11+2)) += LocalM2_2; (*(C11+3)) += LocalM2_3; (*(C12)) += LocalM5_0 + T1_0; (*(C12+1)) += LocalM5_1 + T1_1; (*(C12+2)) += LocalM5_2 + T1_2; (*(C12+3)) += LocalM5_3 + T1_3; (*(C22)) = LocalM5_0 + T2_0; (*(C22+1)) = LocalM5_1 + T2_1; (*(C22+2)) = LocalM5_2 + T2_2; (*(C22+3)) = LocalM5_3 + T2_3; (*(C21 )) = (- *(C21 )) + T2_0; (*(C21+1)) = (- *(C21+1)) + T2_1; (*(C21+2)) = (- *(C21+2)) + T2_2; (*(C21+3)) = (- *(C21+3)) + T2_3; M5 += 4; M2 += 4; T1sMULT += 4; C11 += 4; C12 += 4; C21 += 4; C22 += 4; } C11 = (REAL*) ( ((PTR) C11 ) + RowIncrementC); C12 = (REAL*) ( ((PTR) C12 ) + RowIncrementC); C21 = (REAL*) ( ((PTR) C21 ) + RowIncrementC); C22 = (REAL*) ( ((PTR) C22 ) + RowIncrementC); } free(StartHeap); } #endif /* * Set an n by n matrix A to random values. The distance between * rows is an */ void init_matrix(int n, REAL *A, int an) { int i, j; for (i = 0; i < n; ++i) for (j = 0; j < n; ++j) ELEM(A, an, i, j) = ((double) rand()) / (double) RAND_MAX; } /* * Compare two matrices. Print an error message if they differ by * more than EPSILON. */ int compare_matrix(int n, REAL *A, int an, REAL *B, int bn) { int i, j; REAL c; for (i = 0; i < n; ++i) for (j = 0; j < n; ++j) { /* compute the relative error c */ c = ELEM(A, an, i, j) - ELEM(B, bn, i, j); if (c < 0.0) c = -c; c = c / ELEM(A, an, i, j); if (c > EPSILON) { bots_message("Strassen: Wrong answer!\n"); return BOTS_RESULT_UNSUCCESSFUL; } } return BOTS_RESULT_SUCCESSFUL; } /* * Allocate a matrix of side n (therefore n^2 elements) */ REAL *alloc_matrix(int n) { return malloc(n * n * sizeof(REAL)); } void strassen_main_par(REAL *A, REAL *B, REAL *C, int n) { bots_message("Computing parallel Strassen algorithm (n=%d) ", n); #pragma omp parallel #pragma omp single #pragma omp task untied OptimizedStrassenMultiply_par(C, A, B, n, n, n, n, 1); bots_message(" completed!\n"); } void strassen_main_seq(REAL *A, REAL *B, REAL *C, int n) { bots_message("Computing sequential Strassen algorithm (n=%d) ", n); OptimizedStrassenMultiply_seq(C, A, B, n, n, n, n, 1); bots_message(" completed!\n"); }
9431.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "covariance.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < M; i++) for (j = 0; j < N; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_covariance(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m)) { int i, j, j1, j2; #pragma scop /* Determine mean of column vectors of input data matrix */ { for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Center the column vectors. */ for (i = 0; i < _PB_N; i++) { #pragma omp parallel for simd num_threads(8) for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; } } /* Calculate the m * m covariance matrix. */ for (j1 = 0; j1 < _PB_M; j1++) { #pragma omp parallel for simd num_threads(8) for (j2 = j1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += data[i][j1] * data[i][j2]; symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_covariance (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); return 0; }
questao03.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include "omp.h" int main() { long int **matriz; long int sum_s = 0, sum_p = 0; int entrada, i, j; double t1_s, t2_s, t1_p, t2_p; srand(time(0)); printf("Informe um numero inteiro positivo: "); scanf("%d", &entrada); if(entrada < 0) { printf("\nNumero invalido"); return 1; } matriz = (long int**)malloc(entrada * sizeof(long int)); for(i = 0; i <= entrada; i++) { matriz[i] = (long int*)malloc(entrada * sizeof(long int)); } for(i = 0; i <= entrada; i++) { matriz[i][0] = abs(rand()); matriz[0][i] = abs(rand()); } for(i = 1; i <= entrada; i++) { for(j = 1; j <= entrada; j++) { matriz[i][j] = matriz[i - 1][j] + matriz[i][j - 1]; } } // for(i = 0; i <= entrada; i++) { // for(j = 0; j <= entrada; j++) { // printf("%ld ", matriz[i][j]); // } // printf("\n"); // } t1_s = omp_get_wtime(); for(i = 1; i <= entrada; i++) { for(j = 1; j <= entrada; j++) { sum_s += matriz[i][j]; } } t2_s = omp_get_wtime(); t1_p = omp_get_wtime(); #pragma omp parallel { long int local_sum = 0; #pragma omp for for(i = 0; i <= entrada; i++) { for(j = 0; j <= entrada; j++) { local_sum += matriz[i][j]; } } #pragma omp critical { sum_p += local_sum; } } t2_p = omp_get_wtime(); printf("\nSequencial"); printf("\nSoma : %ld", sum_s); printf("\nTempo: %lf\n", t2_s - t1_s); printf("\nParalelo"); printf("\nSoma : %ld", sum_p); printf("\nTempo: %lf\n", t2_p - t1_p); for(i = 0; i <= entrada; i++) { free(matriz[i]); } free(matriz); return 0; }
GrB_Matrix_nrows.c
//------------------------------------------------------------------------------ // GrB_Matrix_nrows: number of rows of a sparse matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB.h" GrB_Info GrB_Matrix_nrows // get the number of rows of a matrix ( GrB_Index *nrows, // matrix has nrows rows const GrB_Matrix A // matrix to query ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_WHERE1 ("GrB_Matrix_nrows (&nrows, A)") ; GB_RETURN_IF_NULL (nrows) ; GB_RETURN_IF_NULL_OR_FAULTY (A) ; //-------------------------------------------------------------------------- // get the number of rows //-------------------------------------------------------------------------- (*nrows) = GB_NROWS (A) ; #pragma omp flush return (GrB_SUCCESS) ; }
composite.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE % % C O O MM MM P P O O SS I T E % % C O O M M M PPPP O O SSS I T EEE % % C O O M M P O O SS I T E % % CCCC OOO M M P OOO SSSSS IIIII T EEEEE % % % % % % MagickCore Image Composite Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/memory_.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/resample.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p o s i t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompositeImage() returns the second image composited onto the first % at the specified offset, using the specified composite method. % % The format of the CompositeImage method is: % % MagickBooleanType CompositeImage(Image *image, % const Image *source_image,const CompositeOperator compose, % const MagickBooleanType clip_to_self,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the canvas image, modified by he composition % % o source_image: the source image. % % o compose: This operator affects how the composite is applied to % the image. The operators and how they are utilized are listed here % http://www.w3.org/TR/SVG12/#compositing. % % o clip_to_self: set to MagickTrue to limit composition to area composed. % % o x_offset: the column offset of the composited image. % % o y_offset: the row offset of the composited image. % % Extra Controls from Image meta-data in 'image' (artifacts) % % o "compose:args" % A string containing extra numerical arguments for specific compose % methods, generally expressed as a 'geometry' or a comma separated list % of numbers. % % Compose methods needing such arguments include "BlendCompositeOp" and % "DisplaceCompositeOp". % % o exception: return any errors or warnings in this structure. % */ /* Composition based on the SVG specification: A Composition is defined by... Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors Blending areas : X = 1 for area of overlap, ie: f(Sc,Dc) Y = 1 for source preserved Z = 1 for canvas preserved Conversion to transparency (then optimized) Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa) Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa) Where... Sca = Sc*Sa normalized Source color divided by Source alpha Dca = Dc*Da normalized Dest color divided by Dest alpha Dc' = Dca'/Da' the desired color value for this channel. Da' in in the follow formula as 'gamma' The resulting alpla value. Most functions use a blending mode of over (X=1,Y=1,Z=1) this results in the following optimizations... gamma = Sa+Da-Sa*Da; gamma = 1 - QuantumScale*alpha * QuantumScale*beta; opacity = QuantumScale*alpha*beta; // over blend, optimized 1-Gamma The above SVG definitions also define that Mathematical Composition methods should use a 'Over' blending mode for Alpha Channel. It however was not applied for composition modes of 'Plus', 'Minus', the modulus versions of 'Add' and 'Subtract'. Mathematical operator changes to be applied from IM v6.7... 1) Modulus modes 'Add' and 'Subtract' are obsoleted and renamed 'ModulusAdd' and 'ModulusSubtract' for clarity. 2) All mathematical compositions work as per the SVG specification with regard to blending. This now includes 'ModulusAdd' and 'ModulusSubtract'. 3) When the special channel flag 'sync' (syncronize channel updates) is turned off (enabled by default) then mathematical compositions are only performed on the channels specified, and are applied independantally of each other. In other words the mathematics is performed as 'pure' mathematical operations, rather than as image operations. */ static void HCLComposite(const MagickRealType hue,const MagickRealType chroma, const MagickRealType luma,MagickRealType *red,MagickRealType *green, MagickRealType *blue) { MagickRealType b, c, g, h, m, r, x; /* Convert HCL to RGB colorspace. */ assert(red != (MagickRealType *) NULL); assert(green != (MagickRealType *) NULL); assert(blue != (MagickRealType *) NULL); h=6.0*hue; c=chroma; x=c*(1.0-fabs(fmod(h,2.0)-1.0)); r=0.0; g=0.0; b=0.0; if ((0.0 <= h) && (h < 1.0)) { r=c; g=x; } else if ((1.0 <= h) && (h < 2.0)) { r=x; g=c; } else if ((2.0 <= h) && (h < 3.0)) { g=c; b=x; } else if ((3.0 <= h) && (h < 4.0)) { g=x; b=c; } else if ((4.0 <= h) && (h < 5.0)) { r=x; b=c; } else if ((5.0 <= h) && (h < 6.0)) { r=c; b=x; } m=luma-(0.298839*r+0.586811*g+0.114350*b); *red=QuantumRange*(r+m); *green=QuantumRange*(g+m); *blue=QuantumRange*(b+m); } static void CompositeHCL(const MagickRealType red,const MagickRealType green, const MagickRealType blue,MagickRealType *hue,MagickRealType *chroma, MagickRealType *luma) { MagickRealType b, c, g, h, max, r; /* Convert RGB to HCL colorspace. */ assert(hue != (MagickRealType *) NULL); assert(chroma != (MagickRealType *) NULL); assert(luma != (MagickRealType *) NULL); r=red; g=green; b=blue; max=MagickMax(r,MagickMax(g,b)); c=max-(MagickRealType) MagickMin(r,MagickMin(g,b)); h=0.0; if (c == 0) h=0.0; else if (red == max) h=fmod((g-b)/c+6.0,6.0); else if (green == max) h=((b-r)/c)+2.0; else if (blue == max) h=((r-g)/c)+4.0; *hue=(h/6.0); *chroma=QuantumScale*c; *luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b); } static MagickBooleanType CompositeOverImage(Image *image, const Image *source_image,const MagickBooleanType clip_to_self, const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) { #define CompositeImageTag "Composite/Image" CacheView *image_view, *source_view; const char *value; MagickBooleanType clamp, status; MagickOffsetType progress; ssize_t y; /* Composite image. */ status=MagickTrue; progress=0; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsStringTrue(value); status=MagickTrue; progress=0; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *pixels; PixelInfo canvas_pixel, source_pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(Quantum *) NULL; p=(Quantum *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset*(ssize_t) GetPixelChannels(source_image); } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&canvas_pixel); GetPixelInfo(source_image,&source_pixel); for (x=0; x < (ssize_t) image->columns; x++) { double gamma; MagickRealType alpha, Da, Dc, Dca, Sa, Sc, Sca; register ssize_t i; size_t channels; if (clip_to_self != MagickFalse) { if (x < x_offset) { q+=GetPixelChannels(image); continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } if ((pixels == (Quantum *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { Quantum source[MaxPixelChannels]; /* Virtual composite: Sc: source color. Dc: canvas color. */ (void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source, exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image, channel); if ((traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) q[i]; q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } q+=GetPixelChannels(image); continue; } /* Authentic composite: Sa: normalized source alpha. Da: normalized canvas alpha. */ Sa=QuantumScale*GetPixelAlpha(source_image,p); Da=QuantumScale*GetPixelAlpha(image,q); alpha=Sa+Da-Sa*Da; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image,channel); if (traits == UndefinedPixelTrait) continue; if ((source_traits == UndefinedPixelTrait) && (channel != AlphaPixelChannel)) continue; if (channel == AlphaPixelChannel) { /* Set alpha channel. */ pixel=QuantumRange*alpha; q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); continue; } /* Sc: source color. Dc: canvas color. */ Sc=(MagickRealType) GetPixelChannel(source_image,channel,p); Dc=(MagickRealType) q[i]; if ((traits & CopyPixelTrait) != 0) { /* Copy channel. */ q[i]=ClampToQuantum(Sc); continue; } /* Porter-Duff compositions: Sca: source normalized color multiplied by alpha. Dca: normalized canvas color multiplied by alpha. */ Sca=QuantumScale*Sa*Sc; Dca=QuantumScale*Da*Dc; gamma=PerceptibleReciprocal(alpha); pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa)); q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } p+=GetPixelChannels(source_image); channels=GetPixelChannels(source_image); if (p >= (pixels+channels*source_image->columns)) p=pixels; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CompositeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } MagickExport MagickBooleanType CompositeImage(Image *image, const Image *composite,const CompositeOperator compose, const MagickBooleanType clip_to_self,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define CompositeImageTag "Composite/Image" CacheView *source_view, *image_view; const char *value; GeometryInfo geometry_info; Image *canvas_image, *source_image; MagickBooleanType clamp, status; MagickOffsetType progress; MagickRealType amount, canvas_dissolve, midpoint, percent_luma, percent_chroma, source_dissolve, threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(composite != (Image *) NULL); assert(composite->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); source_image=CloneImage(composite,0,0,MagickTrue,exception); if (source_image == (const Image *) NULL) return(MagickFalse); if (IsGrayColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); (void) SetImageColorspace(source_image,image->colorspace,exception); if ((compose == OverCompositeOp) || (compose == SrcOverCompositeOp)) { status=CompositeOverImage(image,source_image,clip_to_self,x_offset, y_offset,exception); source_image=DestroyImage(source_image); return(status); } amount=0.5; canvas_image=(Image *) NULL; canvas_dissolve=1.0; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsStringTrue(value); SetGeometryInfo(&geometry_info); percent_luma=100.0; percent_chroma=100.0; source_dissolve=1.0; threshold=0.05f; switch (compose) { case CopyCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows) break; if ((source_image->alpha_trait == UndefinedPixelTrait) && (image->alpha_trait != UndefinedPixelTrait)) (void) SetImageAlphaChannel(source_image,OpaqueAlphaChannel,exception); status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *p; register Quantum *q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source_image->columns; x++) { register ssize_t i; if (GetPixelReadMask(source_image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(source_image); i++) { PixelChannel channel = GetPixelChannelChannel(source_image,i); PixelTrait source_traits = GetPixelChannelTraits(source_image, channel); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((source_traits == UndefinedPixelTrait) || (traits == UndefinedPixelTrait)) continue; SetPixelChannel(image,channel,p[i],q); } p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case IntensityCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows) break; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *p; register Quantum *q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source_image->columns; x++) { if (GetPixelReadMask(source_image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); continue; } SetPixelAlpha(image,clamp != MagickFalse ? ClampPixel(GetPixelIntensity(source_image,p)) : ClampToQuantum(GetPixelIntensity(source_image,p)),q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case CopyAlphaCompositeOp: case ChangeMaskCompositeOp: { /* Modify canvas outside the overlaid region and require an alpha channel to exist, to add transparency. */ if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case BlurCompositeOp: { CacheView *canvas_view; MagickRealType angle_range, angle_start, height, width; PixelInfo pixel; ResampleFilter *resample_filter; SegmentInfo blur; /* Blur Image by resampling. Blur Image dictated by an overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,angle]]. */ canvas_image=CloneImage(image,0,0,MagickTrue, exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } /* Gather the maximum blur sigma values from user. */ flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (const char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & WidthValue) == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "InvalidSetting","'%s' '%s'","compose:args",value); source_image=DestroyImage(source_image); canvas_image=DestroyImage(canvas_image); return(MagickFalse); } /* Users input sigma now needs to be converted to the EWA ellipse size. The filter defaults to a sigma of 0.5 so to make this match the users input the ellipse size needs to be doubled. */ width=height=geometry_info.rho*2.0; if ((flags & HeightValue) != 0 ) height=geometry_info.sigma*2.0; /* Default the unrotated ellipse width and height axis vectors. */ blur.x1=width; blur.x2=0.0; blur.y1=0.0; blur.y2=height; /* rotate vectors if a rotation angle is given */ if ((flags & XValue) != 0 ) { MagickRealType angle; angle=DegreesToRadians(geometry_info.xi); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } /* Otherwise lets set a angle range and calculate in the loop */ angle_start=0.0; angle_range=0.0; if ((flags & YValue) != 0 ) { angle_start=DegreesToRadians(geometry_info.xi); angle_range=DegreesToRadians(geometry_info.psi)-angle_start; } /* Set up a gaussian cylindrical filter for EWA Bluring. As the minimum ellipse radius of support*1.0 the EWA algorithm can only produce a minimum blur of 0.5 for Gaussian (support=2.0) This means that even 'No Blur' will be still a little blurry! The solution (as well as the problem of preventing any user expert filter settings, is to set our own user settings, then restore them afterwards. */ resample_filter=AcquireResampleFilter(image,exception); SetResampleFilter(resample_filter,GaussianFilter); /* do the variable blurring of each pixel in image */ GetPixelInfo(image,&pixel); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p+=GetPixelChannels(source_image); continue; } if (fabs((double) angle_range) > MagickEpsilon) { MagickRealType angle; angle=angle_start+angle_range*QuantumScale* GetPixelBlue(source_image,p); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } #if 0 if ( x == 10 && y == 60 ) { (void) fprintf(stderr, "blur.x=%lf,%lf, blur.y=%lf,%lf\n",blur.x1, blur.x2,blur.y1, blur.y2); (void) fprintf(stderr, "scaled by=%lf,%lf\n",QuantumScale* GetPixelRed(p),QuantumScale*GetPixelGreen(p)); #endif ScaleResampleFilter(resample_filter, blur.x1*QuantumScale*GetPixelRed(source_image,p), blur.y1*QuantumScale*GetPixelGreen(source_image,p), blur.x2*QuantumScale*GetPixelRed(source_image,p), blur.y2*QuantumScale*GetPixelGreen(source_image,p) ); (void) ResamplePixelColor(resample_filter,(double) x_offset+x, (double) y_offset+y,&pixel,exception); SetPixelViaPixelInfo(canvas_image,&pixel,q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(canvas_image); } sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } resample_filter=DestroyResampleFilter(resample_filter); source_view=DestroyCacheView(source_view); canvas_view=DestroyCacheView(canvas_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DisplaceCompositeOp: case DistortCompositeOp: { CacheView *canvas_view; MagickRealType horizontal_scale, vertical_scale; PixelInfo pixel; PointInfo center, offset; /* Displace/Distort based on overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,center.x,center.y]] */ canvas_image=CloneImage(image,0,0,MagickTrue, exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & (WidthValue | HeightValue)) == 0 ) { if ((flags & AspectValue) == 0) { horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0; vertical_scale=(MagickRealType) (source_image->rows-1)/2.0; } else { horizontal_scale=(MagickRealType) (image->columns-1)/2.0; vertical_scale=(MagickRealType) (image->rows-1)/2.0; } } else { horizontal_scale=geometry_info.rho; vertical_scale=geometry_info.sigma; if ((flags & PercentValue) != 0) { if ((flags & AspectValue) == 0) { horizontal_scale*=(source_image->columns-1)/200.0; vertical_scale*=(source_image->rows-1)/200.0; } else { horizontal_scale*=(image->columns-1)/200.0; vertical_scale*=(image->rows-1)/200.0; } } if ((flags & HeightValue) == 0) vertical_scale=horizontal_scale; } /* Determine fixed center point for absolute distortion map Absolute distort == Displace offset relative to a fixed absolute point Select that point according to +X+Y user inputs. default = center of overlay image arg flag '!' = locations/percentage relative to background image */ center.x=(MagickRealType) x_offset; center.y=(MagickRealType) y_offset; if (compose == DistortCompositeOp) { if ((flags & XValue) == 0) if ((flags & AspectValue) != 0) center.x=(MagickRealType) ((image->columns-1)/2.0); else center.x=(MagickRealType) (x_offset+(source_image->columns-1)/ 2.0); else if ((flags & AspectValue) != 0) center.x=geometry_info.xi; else center.x=(MagickRealType) (x_offset+geometry_info.xi); if ((flags & YValue) == 0) if ((flags & AspectValue) != 0) center.y=(MagickRealType) ((image->rows-1)/2.0); else center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0); else if ((flags & AspectValue) != 0) center.y=geometry_info.psi; else center.y=(MagickRealType) (y_offset+geometry_info.psi); } /* Shift the pixel offset point as defined by the provided, displacement/distortion map. -- Like a lens... */ GetPixelInfo(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p+=GetPixelChannels(source_image); continue; } /* Displace the offset. */ offset.x=(double) (horizontal_scale*(GetPixelRed(source_image,p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ? x : 0); offset.y=(double) (vertical_scale*(GetPixelGreen(source_image,p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ? y : 0); status=InterpolatePixelInfo(image,image_view, UndefinedInterpolatePixel,(double) offset.x,(double) offset.y, &pixel,exception); if (status == MagickFalse) break; /* Mask with the 'invalid pixel mask' in alpha channel. */ pixel.alpha=(MagickRealType) QuantumRange*(QuantumScale*pixel.alpha)* (QuantumScale*GetPixelAlpha(source_image,p)); SetPixelViaPixelInfo(canvas_image,&pixel,q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(canvas_image); } if (x < (ssize_t) source_image->columns) break; sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } canvas_view=DestroyCacheView(canvas_view); source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DissolveCompositeOp: { /* Geometry arguments to dissolve factors. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0; if ((source_dissolve-MagickEpsilon) < 0.0) source_dissolve=0.0; if ((source_dissolve+MagickEpsilon) > 1.0) { canvas_dissolve=2.0-source_dissolve; source_dissolve=1.0; } if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; if ((canvas_dissolve-MagickEpsilon) < 0.0) canvas_dissolve=0.0; } break; } case BlendCompositeOp: { value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0-source_dissolve; if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; } break; } case MathematicsCompositeOp: { /* Just collect the values from "compose:args", setting. Unused values are set to zero automagically. Arguments are normally a comma separated list, so this probably should be changed to some 'general comma list' parser, (with a minimum number of values) */ SetGeometryInfo(&geometry_info); value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) (void) ParseGeometry(value,&geometry_info); break; } case ModulateCompositeOp: { /* Determine the luma and chroma scale. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); percent_luma=geometry_info.rho; if ((flags & SigmaValue) != 0) percent_chroma=geometry_info.sigma; } break; } case ThresholdCompositeOp: { /* Determine the amount and threshold. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); amount=geometry_info.rho; threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold=0.05f; } threshold*=QuantumRange; break; } default: break; } /* Composite image. */ status=MagickTrue; progress=0; midpoint=((MagickRealType) QuantumRange+1.0)/2; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *pixels; MagickRealType blue, chroma, green, hue, luma, red; PixelInfo canvas_pixel, source_pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(Quantum *) NULL; p=(Quantum *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset*(ssize_t) GetPixelChannels(source_image); } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } hue=0.0; chroma=0.0; luma=0.0; GetPixelInfo(image,&canvas_pixel); GetPixelInfo(source_image,&source_pixel); for (x=0; x < (ssize_t) image->columns; x++) { double gamma; MagickRealType alpha, Da, Dc, Dca, DcaDa, Sa, SaSca, Sc, Sca; register ssize_t i; size_t channels; if (clip_to_self != MagickFalse) { if (x < x_offset) { q+=GetPixelChannels(image); continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } if ((pixels == (Quantum *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { Quantum source[MaxPixelChannels]; /* Virtual composite: Sc: source color. Dc: canvas color. */ (void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source, exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image, channel); if ((traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; switch (compose) { case AlphaCompositeOp: case ChangeMaskCompositeOp: case CopyAlphaCompositeOp: case DstAtopCompositeOp: case DstInCompositeOp: case InCompositeOp: case OutCompositeOp: case SrcInCompositeOp: case SrcOutCompositeOp: { if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) q[i]; break; } case ClearCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=0.0; break; } case BlendCompositeOp: case DissolveCompositeOp: { if (channel == AlphaPixelChannel) pixel=canvas_dissolve*GetPixelAlpha(source_image,source); else pixel=(MagickRealType) source[channel]; break; } default: { pixel=(MagickRealType) source[channel]; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } q+=GetPixelChannels(image); continue; } /* Authentic composite: Sa: normalized source alpha. Da: normalized canvas alpha. */ Sa=QuantumScale*GetPixelAlpha(source_image,p); Da=QuantumScale*GetPixelAlpha(image,q); switch (compose) { case BumpmapCompositeOp: { alpha=GetPixelIntensity(source_image,p)*Sa; break; } case ColorBurnCompositeOp: case ColorDodgeCompositeOp: case DarkenCompositeOp: case DifferenceCompositeOp: case DivideDstCompositeOp: case DivideSrcCompositeOp: case ExclusionCompositeOp: case HardLightCompositeOp: case HardMixCompositeOp: case LinearBurnCompositeOp: case LinearDodgeCompositeOp: case LinearLightCompositeOp: case LightenCompositeOp: case MathematicsCompositeOp: case MinusDstCompositeOp: case MinusSrcCompositeOp: case MultiplyCompositeOp: case OverlayCompositeOp: case PegtopLightCompositeOp: case PinLightCompositeOp: case ScreenCompositeOp: case SoftLightCompositeOp: case VividLightCompositeOp: { alpha=RoundToUnity(Sa+Da-Sa*Da); break; } case DstAtopCompositeOp: case DstInCompositeOp: case InCompositeOp: case SrcInCompositeOp: { alpha=Sa*Da; break; } case DissolveCompositeOp: { alpha=source_dissolve*Sa*(-canvas_dissolve*Da)+source_dissolve*Sa+ canvas_dissolve*Da; break; } case DstOverCompositeOp: case OverCompositeOp: case SrcOverCompositeOp: { alpha=Sa+Da-Sa*Da; break; } case DstOutCompositeOp: { alpha=Da*(1.0-Sa); break; } case OutCompositeOp: case SrcOutCompositeOp: { alpha=Sa*(1.0-Da); break; } case BlendCompositeOp: case PlusCompositeOp: { alpha=RoundToUnity(source_dissolve*Sa+canvas_dissolve*Da); break; } case XorCompositeOp: { alpha=Sa+Da-2.0*Sa*Da; break; } case ModulusAddCompositeOp: { if ((Sa+Da) <= 1.0) { alpha=(Sa+Da); break; } alpha=((Sa+Da)-1.0); break; } case ModulusSubtractCompositeOp: { if ((Sa-Da) >= 0.0) { alpha=(Sa-Da); break; } alpha=((Sa-Da)+1.0); break; } default: { alpha=1.0; break; } } switch (compose) { case ColorizeCompositeOp: case HueCompositeOp: case LuminizeCompositeOp: case ModulateCompositeOp: case SaturateCompositeOp: { GetPixelInfoPixel(source_image,p,&source_pixel); GetPixelInfoPixel(image,q,&canvas_pixel); break; } default: break; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel, sans; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits = GetPixelChannelTraits(source_image,channel); if (traits == UndefinedPixelTrait) continue; if ((channel == AlphaPixelChannel) && ((traits & UpdatePixelTrait) != 0)) { /* Set alpha channel. */ switch (compose) { case AlphaCompositeOp: { pixel=QuantumRange*Sa; break; } case AtopCompositeOp: case CopyBlackCompositeOp: case CopyBlueCompositeOp: case CopyCyanCompositeOp: case CopyGreenCompositeOp: case CopyMagentaCompositeOp: case CopyRedCompositeOp: case CopyYellowCompositeOp: case SrcAtopCompositeOp: case DstCompositeOp: case NoCompositeOp: { pixel=QuantumRange*Da; break; } case ChangeMaskCompositeOp: { MagickBooleanType equivalent; if (Da < 0.5) { pixel=(MagickRealType) TransparentAlpha; break; } equivalent=IsFuzzyEquivalencePixel(source_image,p,image,q); if (equivalent != MagickFalse) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) OpaqueAlpha; break; } case ClearCompositeOp: { pixel=(MagickRealType) TransparentAlpha; break; } case ColorizeCompositeOp: case HueCompositeOp: case LuminizeCompositeOp: case SaturateCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=QuantumRange*Da; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=QuantumRange*Sa; break; } if (Sa < Da) { pixel=QuantumRange*Da; break; } pixel=QuantumRange*Sa; break; } case CopyAlphaCompositeOp: { if (source_image->alpha_trait == UndefinedPixelTrait) pixel=GetPixelIntensity(source_image,p); else pixel=QuantumRange*Sa; break; } case CopyCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: case DstAtopCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { pixel=QuantumRange*Sa; break; } case DarkenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) < Da*GetPixelIntensity(image,q) ? Sa : Da; break; } case DifferenceCompositeOp: { pixel=QuantumRange*fabs(Sa-Da); break; } case LightenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) > Da*GetPixelIntensity(image,q) ? Sa : Da; break; } case ModulateCompositeOp: { pixel=QuantumRange*Da; break; } case MultiplyCompositeOp: { pixel=QuantumRange*Sa*Da; break; } case StereoCompositeOp: { pixel=QuantumRange*(Sa+Da)/2; break; } default: { pixel=QuantumRange*alpha; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); continue; } if (source_traits == UndefinedPixelTrait) continue; /* Sc: source color. Dc: canvas color. */ Sc=(MagickRealType) GetPixelChannel(source_image,channel,p); Dc=(MagickRealType) q[i]; if ((traits & CopyPixelTrait) != 0) { /* Copy channel. */ q[i]=ClampToQuantum(Dc); continue; } /* Porter-Duff compositions: Sca: source normalized color multiplied by alpha. Dca: normalized canvas color multiplied by alpha. */ Sca=QuantumScale*Sa*Sc; Dca=QuantumScale*Da*Dc; SaSca=Sa*PerceptibleReciprocal(Sca); DcaDa=Dca*PerceptibleReciprocal(Da); switch (compose) { case DarkenCompositeOp: case LightenCompositeOp: case ModulusSubtractCompositeOp: { gamma=PerceptibleReciprocal(1.0-alpha); break; } default: { gamma=PerceptibleReciprocal(alpha); break; } } pixel=Dc; switch (compose) { case AlphaCompositeOp: { pixel=QuantumRange*Sa; break; } case AtopCompositeOp: case SrcAtopCompositeOp: { pixel=QuantumRange*(Sca*Da+Dca*(1.0-Sa)); break; } case BlendCompositeOp: { pixel=gamma*(source_dissolve*Sa*Sc+canvas_dissolve*Da*Dc); break; } case BlurCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { pixel=QuantumRange*Sca; break; } case DisplaceCompositeOp: case DistortCompositeOp: { pixel=Sc; break; } case BumpmapCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } pixel=QuantumScale*GetPixelIntensity(source_image,p)*Dc; break; } case ChangeMaskCompositeOp: { pixel=Dc; break; } case ClearCompositeOp: { pixel=0.0; break; } case ColorBurnCompositeOp: { if ((Sca == 0.0) && (Dca == Da)) { pixel=QuantumRange*gamma*(Sa*Da+Dca*(1.0-Sa)); break; } if (Sca == 0.0) { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sa*Da-Sa*Da*MagickMin(1.0,(1.0-DcaDa)* SaSca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case ColorDodgeCompositeOp: { if ((Sca*Da+Dca*Sa) >= Sa*Da) pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); else pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(Sa-Sca)+ Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case ColorizeCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &sans,&sans,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &hue,&chroma,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case CopyAlphaCompositeOp: { pixel=Dc; break; } case CopyBlackCompositeOp: { if (channel == BlackPixelChannel) pixel=(MagickRealType) GetPixelBlack(source_image,p); break; } case CopyBlueCompositeOp: case CopyYellowCompositeOp: { if (channel == BluePixelChannel) pixel=(MagickRealType) GetPixelBlue(source_image,p); break; } case CopyGreenCompositeOp: case CopyMagentaCompositeOp: { if (channel == GreenPixelChannel) pixel=(MagickRealType) GetPixelGreen(source_image,p); break; } case CopyRedCompositeOp: case CopyCyanCompositeOp: { if (channel == RedPixelChannel) pixel=(MagickRealType) GetPixelRed(source_image,p); break; } case DarkenCompositeOp: { /* Darken is equivalent to a 'Minimum' method OR a greyscale version of a binary 'Or' OR the 'Intersection' of pixel sets. */ if ((Sca*Da) < (Dca*Sa)) { pixel=QuantumRange*(Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*(Dca+Sca*(1.0-Da)); break; } case DarkenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) < Da*GetPixelIntensity(image,q) ? Sc : Dc; break; } case DifferenceCompositeOp: { pixel=QuantumRange*gamma*(Sca+Dca-2.0*MagickMin(Sca*Da,Dca*Sa)); break; } case DissolveCompositeOp: { pixel=gamma*(source_dissolve*Sa*Sc-source_dissolve*Sa* canvas_dissolve*Da*Dc+canvas_dissolve*Da*Dc); break; } case DivideDstCompositeOp: { if ((fabs((double) Sca) < MagickEpsilon) && (fabs((double) Dca) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if (fabs((double) Dca) < MagickEpsilon) { pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case DivideSrcCompositeOp: { if ((fabs((double) Dca) < MagickEpsilon) && (fabs((double) Sca) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } if (fabs((double) Sca) < MagickEpsilon) { pixel=QuantumRange*gamma*(Da*Sa+Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } pixel=QuantumRange*gamma*(Dca*Sa*SaSca+Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } case DstAtopCompositeOp: { pixel=QuantumRange*(Dca*Sa+Sca*(1.0-Da)); break; } case DstCompositeOp: case NoCompositeOp: { pixel=QuantumRange*Dca; break; } case DstInCompositeOp: { pixel=QuantumRange*gamma*(Dca*Sa); break; } case DstOutCompositeOp: { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)); break; } case DstOverCompositeOp: { pixel=QuantumRange*gamma*(Dca+Sca*(1.0-Da)); break; } case ExclusionCompositeOp: { pixel=QuantumRange*gamma*(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } case HardLightCompositeOp: { if ((2.0*Sca) < Sa) { pixel=QuantumRange*gamma*(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0- Sa)); break; } pixel=QuantumRange*gamma*(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } case HardMixCompositeOp: { pixel=gamma*(((Sca+Dca) < 1.0) ? 0.0 : QuantumRange); break; } case HueCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &hue,&sans,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case InCompositeOp: case SrcInCompositeOp: { pixel=QuantumRange*(Sca*Da); break; } case LinearBurnCompositeOp: { /* LinearBurn: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Sc + Dc - 1 */ pixel=QuantumRange*gamma*(Sca+Dca-Sa*Da); break; } case LinearDodgeCompositeOp: { pixel=gamma*(Sa*Sc+Da*Dc); break; } case LinearLightCompositeOp: { /* LinearLight: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Dc + 2*Sc - 1 */ pixel=QuantumRange*gamma*((Sca-Sa)*Da+Sca+Dca); break; } case LightenCompositeOp: { if ((Sca*Da) > (Dca*Sa)) { pixel=QuantumRange*(Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*(Dca+Sca*(1.0-Da)); break; } case LightenIntensityCompositeOp: { /* Lighten is equivalent to a 'Maximum' method OR a greyscale version of a binary 'And' OR the 'Union' of pixel sets. */ pixel=Sa*GetPixelIntensity(source_image,p) > Da*GetPixelIntensity(image,q) ? Sc : Dc; break; } case LuminizeCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &sans,&sans,&luma); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case MathematicsCompositeOp: { /* 'Mathematics' a free form user control mathematical composition is defined as... f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D Where the arguments A,B,C,D are (currently) passed to composite as a command separated 'geometry' string in "compose:args" image artifact. A = a->rho, B = a->sigma, C = a->xi, D = a->psi Applying the SVG transparency formula (see above), we get... Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa) Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) + Dca*(1.0-Sa) */ pixel=QuantumRange*gamma*(geometry_info.rho*Sca*Dca+ geometry_info.sigma*Sca*Da+geometry_info.xi*Dca*Sa+ geometry_info.psi*Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case MinusDstCompositeOp: { pixel=gamma*(Sa*Sc+Da*Dc-2.0*Da*Dc*Sa); break; } case MinusSrcCompositeOp: { /* Minus source from canvas. f(Sc,Dc) = Sc - Dc */ pixel=gamma*(Da*Dc+Sa*Sc-2.0*Sa*Sc*Da); break; } case ModulateCompositeOp: { ssize_t offset; if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } offset=(ssize_t) (GetPixelIntensity(source_image,p)-midpoint); if (offset == 0) { pixel=Dc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); luma+=(0.01*percent_luma*offset)/midpoint; chroma*=0.01*percent_chroma; HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case ModulusAddCompositeOp: { if ((Sca+Dca) <= 1.0) { pixel=QuantumRange*(Sca+Dca); break; } pixel=QuantumRange*((Sca+Dca)-1.0); break; } case ModulusSubtractCompositeOp: { if ((Sca-Dca) >= 0.0) { pixel=QuantumRange*(Sca-Dca); break; } pixel=QuantumRange*((Sca-Dca)+1.0); break; } case MultiplyCompositeOp: { pixel=QuantumRange*gamma*(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case OutCompositeOp: case SrcOutCompositeOp: { pixel=QuantumRange*(Sca*(1.0-Da)); break; } case OverCompositeOp: case SrcOverCompositeOp: { pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa)); break; } case OverlayCompositeOp: { if ((2.0*Dca) < Da) { pixel=QuantumRange*gamma*(2.0*Dca*Sca+Dca*(1.0-Sa)+Sca*(1.0- Da)); break; } pixel=QuantumRange*gamma*(Da*Sa-2.0*(Sa-Sca)*(Da-Dca)+Dca*(1.0-Sa)+ Sca*(1.0-Da)); break; } case PegtopLightCompositeOp: { /* PegTop: A Soft-Light alternative: A continuous version of the Softlight function, producing very similar results. f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm. */ if (fabs((double) Da) < MagickEpsilon) { pixel=QuantumRange*gamma*Sca; break; } pixel=QuantumRange*gamma*(Dca*Dca*(Sa-2.0*Sca)/Da+Sca*(2.0*Dca+1.0- Da)+Dca*(1.0-Sa)); break; } case PinLightCompositeOp: { /* PinLight: A Photoshop 7 composition method http://www.simplefilter.de/en/basics/mixmods.html f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc */ if ((Dca*Sa) < (Da*(2.0*Sca-Sa))) { pixel=QuantumRange*gamma*(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa)); break; } if ((Dca*Sa) > (2.0*Sca*Da)) { pixel=QuantumRange*gamma*(Sca*Da+Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca); break; } case PlusCompositeOp: { pixel=QuantumRange*(Sca+Dca); break; } case SaturateCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &sans,&chroma,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case ScreenCompositeOp: { /* Screen: a negated multiply: f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc) */ pixel=QuantumRange*gamma*(Sca+Dca-Sca*Dca); break; } case SoftLightCompositeOp: { if ((2.0*Sca) < Sa) { pixel=QuantumRange*gamma*(Dca*(Sa+(2.0*Sca-Sa)*(1.0-DcaDa))+ Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da)) { pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*DcaDa* (4.0*DcaDa+1.0)*(DcaDa-1.0)+7.0*DcaDa)+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(pow(DcaDa,0.5)- DcaDa)+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case StereoCompositeOp: { if (channel == RedPixelChannel) pixel=(MagickRealType) GetPixelRed(source_image,p); break; } case ThresholdCompositeOp: { MagickRealType delta; delta=Sc-Dc; if ((MagickRealType) fabs((double) (2.0*delta)) < threshold) { pixel=gamma*Dc; break; } pixel=gamma*(Dc+delta*amount); break; } case VividLightCompositeOp: { /* VividLight: A Photoshop 7 composition method. See http://www.simplefilter.de/en/basics/mixmods.html. f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc)) */ if ((fabs((double) Sa) < MagickEpsilon) || (fabs((double) (Sca-Sa)) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if ((2.0*Sca) <= Sa) { pixel=QuantumRange*gamma*(Sa*(Da+Sa*(Dca-Da)* PerceptibleReciprocal(2.0*Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(2.0* (Sa-Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case XorCompositeOp: { pixel=QuantumRange*(Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } default: { pixel=Sc; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } p+=GetPixelChannels(source_image); channels=GetPixelChannels(source_image); if (p >= (pixels+channels*source_image->columns)) p=pixels; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CompositeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); if (canvas_image != (Image * ) NULL) canvas_image=DestroyImage(canvas_image); else source_image=DestroyImage(source_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T e x t u r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TextureImage() repeatedly tiles the texture image across and down the image % canvas. % % The format of the TextureImage method is: % % MagickBooleanType TextureImage(Image *image,const Image *texture, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o texture_image: This image is the texture to layer on the background. % */ MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture, ExceptionInfo *exception) { #define TextureImageTag "Texture/Image" CacheView *image_view, *texture_view; Image *texture_image; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (texture == (const Image *) NULL) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); texture_image=CloneImage(texture,0,0,MagickTrue,exception); if (texture_image == (const Image *) NULL) return(MagickFalse); (void) TransformImageColorspace(texture_image,image->colorspace,exception); (void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod, exception); status=MagickTrue; if ((image->compose != CopyCompositeOp) && ((image->compose != OverCompositeOp) || (image->alpha_trait != UndefinedPixelTrait) || (texture_image->alpha_trait != UndefinedPixelTrait))) { /* Tile texture onto the image background. */ for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows) { register ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { MagickBooleanType thread_status; thread_status=CompositeImage(image,texture_image,image->compose, MagickTrue,x+texture_image->tile_offset.x,y+ texture_image->tile_offset.y,exception); if (thread_status == MagickFalse) { status=thread_status; break; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,TextureImageTag,(MagickOffsetType) image->rows,image->rows); texture_image=DestroyImage(texture_image); return(status); } /* Tile texture onto the image background (optimized). */ status=MagickTrue; texture_view=AcquireVirtualCacheView(texture_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(texture_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const Quantum *p, *pixels; register ssize_t x; register Quantum *q; size_t width; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x, (y+texture_image->tile_offset.y) % texture_image->rows, texture_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((pixels == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { register ssize_t j; p=pixels; width=texture_image->columns; if ((x+(ssize_t) width) > (ssize_t) image->columns) width=image->columns-x; for (j=0; j < (ssize_t) width; j++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(texture_image); i++) { PixelChannel channel = GetPixelChannelChannel(texture_image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait texture_traits=GetPixelChannelTraits(texture_image, channel); if ((traits == UndefinedPixelTrait) || (texture_traits == UndefinedPixelTrait)) continue; SetPixelChannel(image,channel,p[i],q); } p+=GetPixelChannels(texture_image); q+=GetPixelChannels(image); } } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } texture_view=DestroyCacheView(texture_view); image_view=DestroyCacheView(image_view); texture_image=DestroyImage(texture_image); return(status); }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 16; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,16);t1++) { lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32)); ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(32*t2-Nz-12,16)),t1);t3<=min(min(min(floord(Nt+Ny-4,16),floord(16*t1+Ny+29,16)),floord(32*t2+Ny+28,16)),floord(32*t1-32*t2+Nz+Ny+27,16));t3++) { for (t4=max(max(max(0,ceild(t1-15,16)),ceild(32*t2-Nz-252,256)),ceild(16*t3-Ny-252,256));t4<=min(min(min(min(floord(Nt+Nx-4,256),floord(16*t1+Nx+29,256)),floord(32*t2+Nx+28,256)),floord(16*t3+Nx+12,256)),floord(32*t1-32*t2+Nz+Nx+27,256));t4++) { for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),16*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),16*t3+14),256*t4+254),32*t1-32*t2+Nz+29);t5++) { for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) { lbv=max(256*t4,t5+1); ubv=min(256*t4+255,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
GB_unop__identity_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fc32_fc32 // op(A') function: GB_unop_tran__identity_fc32_fc32 // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 1 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fc32_fc32 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fc32_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
OpenMPClause.h
//===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // /// \file /// This file defines OpenMP AST classes for clauses. /// There are clauses for executable directives, clauses for declarative /// directives and clauses which can be used in both kinds of directives. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H #define LLVM_CLANG_AST_OPENMPCLAUSE_H #include "clang/AST/ASTFwd.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/Frontend/OpenMP/OMPContext.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/TrailingObjects.h" #include <cassert> #include <cstddef> #include <iterator> #include <utility> namespace clang { class ASTContext; //===----------------------------------------------------------------------===// // AST classes for clauses. //===----------------------------------------------------------------------===// /// This is a basic class for representing single OpenMP clause. class OMPClause { /// Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// Ending location of the clause. SourceLocation EndLoc; /// Kind of the clause. OpenMPClauseKind Kind; protected: OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc) : StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {} public: /// Returns the starting location of the clause. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns the ending location of the clause. SourceLocation getEndLoc() const { return EndLoc; } /// Sets the starting location of the clause. void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Sets the ending location of the clause. void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Returns kind of OpenMP clause (private, shared, reduction, etc.). OpenMPClauseKind getClauseKind() const { return Kind; } bool isImplicit() const { return StartLoc.isInvalid(); } using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } /// Get the iterator range for the expressions used in the clauses. Used /// expressions include only the children that must be evaluated at the /// runtime before entering the construct. child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *) { return true; } }; /// Class that handles pre-initialization statement for some clauses, like /// 'shedule', 'firstprivate' etc. class OMPClauseWithPreInit { friend class OMPClauseReader; /// Pre-initialization statement for the clause. Stmt *PreInit = nullptr; /// Region that captures the associated stmt. OpenMPDirectiveKind CaptureRegion = llvm::omp::OMPD_unknown; protected: OMPClauseWithPreInit(const OMPClause *This) { assert(get(This) && "get is not tuned for pre-init."); } /// Set pre-initialization statement for the clause. void setPreInitStmt(Stmt *S, OpenMPDirectiveKind ThisRegion = llvm::omp::OMPD_unknown) { PreInit = S; CaptureRegion = ThisRegion; } public: /// Get pre-initialization statement for the clause. const Stmt *getPreInitStmt() const { return PreInit; } /// Get pre-initialization statement for the clause. Stmt *getPreInitStmt() { return PreInit; } /// Get capture region for the stmt in the clause. OpenMPDirectiveKind getCaptureRegion() const { return CaptureRegion; } static OMPClauseWithPreInit *get(OMPClause *C); static const OMPClauseWithPreInit *get(const OMPClause *C); }; /// Class that handles post-update expression for some clauses, like /// 'lastprivate', 'reduction' etc. class OMPClauseWithPostUpdate : public OMPClauseWithPreInit { friend class OMPClauseReader; /// Post-update expression for the clause. Expr *PostUpdate = nullptr; protected: OMPClauseWithPostUpdate(const OMPClause *This) : OMPClauseWithPreInit(This) { assert(get(This) && "get is not tuned for post-update."); } /// Set pre-initialization statement for the clause. void setPostUpdateExpr(Expr *S) { PostUpdate = S; } public: /// Get post-update expression for the clause. const Expr *getPostUpdateExpr() const { return PostUpdate; } /// Get post-update expression for the clause. Expr *getPostUpdateExpr() { return PostUpdate; } static OMPClauseWithPostUpdate *get(OMPClause *C); static const OMPClauseWithPostUpdate *get(const OMPClause *C); }; /// This structure contains most locations needed for by an OMPVarListClause. struct OMPVarListLocTy { /// Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// Location of '('. SourceLocation LParenLoc; /// Ending location of the clause. SourceLocation EndLoc; OMPVarListLocTy() = default; OMPVarListLocTy(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : StartLoc(StartLoc), LParenLoc(LParenLoc), EndLoc(EndLoc) {} }; /// This represents clauses with the list of variables like 'private', /// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the /// '#pragma omp ...' directives. template <class T> class OMPVarListClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Number of variables in the list. unsigned NumVars; protected: /// Build a clause with \a N variables /// /// \param K Kind of the clause. /// \param StartLoc Starting location of the clause (the clause keyword). /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {} /// Fetches list of variables associated with this clause. MutableArrayRef<Expr *> getVarRefs() { return MutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } /// Sets the list of variables for this clause. void setVarRefs(ArrayRef<Expr *> VL) { assert(VL.size() == NumVars && "Number of variables is not the same as the preallocated buffer"); std::copy(VL.begin(), VL.end(), static_cast<T *>(this)->template getTrailingObjects<Expr *>()); } public: using varlist_iterator = MutableArrayRef<Expr *>::iterator; using varlist_const_iterator = ArrayRef<const Expr *>::iterator; using varlist_range = llvm::iterator_range<varlist_iterator>; using varlist_const_range = llvm::iterator_range<varlist_const_iterator>; unsigned varlist_size() const { return NumVars; } bool varlist_empty() const { return NumVars == 0; } varlist_range varlists() { return varlist_range(varlist_begin(), varlist_end()); } varlist_const_range varlists() const { return varlist_const_range(varlist_begin(), varlist_end()); } varlist_iterator varlist_begin() { return getVarRefs().begin(); } varlist_iterator varlist_end() { return getVarRefs().end(); } varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); } varlist_const_iterator varlist_end() const { return getVarRefs().end(); } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Fetches list of all variables in the clause. ArrayRef<const Expr *> getVarRefs() const { return llvm::makeArrayRef( static_cast<const T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } }; /// This represents 'allocator' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp allocate(a) allocator(omp_default_mem_alloc) /// \endcode /// In this example directive '#pragma omp allocate' has simple 'allocator' /// clause with the allocator 'omp_default_mem_alloc'. class OMPAllocatorClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Expression with the allocator. Stmt *Allocator = nullptr; /// Set allocator. void setAllocator(Expr *A) { Allocator = A; } public: /// Build 'allocator' clause with the given allocator. /// /// \param A Allocator. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPAllocatorClause(Expr *A, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_allocator, StartLoc, EndLoc), LParenLoc(LParenLoc), Allocator(A) {} /// Build an empty clause. OMPAllocatorClause() : OMPClause(llvm::omp::OMPC_allocator, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns allocator. Expr *getAllocator() const { return cast_or_null<Expr>(Allocator); } child_range children() { return child_range(&Allocator, &Allocator + 1); } const_child_range children() const { return const_child_range(&Allocator, &Allocator + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_allocator; } }; /// This represents the 'align' clause in the '#pragma omp allocate' /// directive. /// /// \code /// #pragma omp allocate(a) allocator(omp_default_mem_alloc) align(8) /// \endcode /// In this example directive '#pragma omp allocate' has simple 'allocator' /// clause with the allocator 'omp_default_mem_alloc' and align clause with /// value of 8. class OMPAlignClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Alignment specified with align clause. Stmt *Alignment = nullptr; /// Set alignment value. void setAlignment(Expr *A) { Alignment = A; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Build 'align' clause with the given alignment /// /// \param A Alignment value. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPAlignClause(Expr *A, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_align, StartLoc, EndLoc), LParenLoc(LParenLoc), Alignment(A) {} /// Build an empty clause. OMPAlignClause() : OMPClause(llvm::omp::OMPC_align, SourceLocation(), SourceLocation()) {} public: /// Build 'align' clause with the given alignment /// /// \param A Alignment value. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. static OMPAlignClause *Create(const ASTContext &C, Expr *A, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns alignment Expr *getAlignment() const { return cast_or_null<Expr>(Alignment); } child_range children() { return child_range(&Alignment, &Alignment + 1); } const_child_range children() const { return const_child_range(&Alignment, &Alignment + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_align; } }; /// This represents clause 'allocate' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a) allocate(omp_default_mem_alloc :a) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// and clause 'allocate' for the variable 'a'. class OMPAllocateClause final : public OMPVarListClause<OMPAllocateClause>, private llvm::TrailingObjects<OMPAllocateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Allocator specified in the clause, or 'nullptr' if the default one is /// used. Expr *Allocator = nullptr; /// Position of the ':' delimiter in the clause; SourceLocation ColonLoc; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Allocator Allocator expression. /// \param ColonLoc Location of ':' delimiter. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPAllocateClause(SourceLocation StartLoc, SourceLocation LParenLoc, Expr *Allocator, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPAllocateClause>(llvm::omp::OMPC_allocate, StartLoc, LParenLoc, EndLoc, N), Allocator(Allocator), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPAllocateClause(unsigned N) : OMPVarListClause<OMPAllocateClause>(llvm::omp::OMPC_allocate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } void setAllocator(Expr *A) { Allocator = A; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Allocator Allocator expression. /// \param ColonLoc Location of ':' delimiter. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPAllocateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, Expr *Allocator, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Returns the allocator expression or nullptr, if no allocator is specified. Expr *getAllocator() const { return Allocator; } /// Returns the location of the ':' delimiter. SourceLocation getColonLoc() const { return ColonLoc; } /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPAllocateClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPAllocateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_allocate; } }; /// This represents 'if' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel if(parallel:a > 5) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'if' clause with /// condition 'a > 5' and directive name modifier 'parallel'. class OMPIfClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Location of ':' (if any). SourceLocation ColonLoc; /// Directive name modifier for the clause. OpenMPDirectiveKind NameModifier = llvm::omp::OMPD_unknown; /// Name modifier location. SourceLocation NameModifierLoc; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } /// Set directive name modifier for the clause. void setNameModifier(OpenMPDirectiveKind NM) { NameModifier = NM; } /// Set location of directive name modifier for the clause. void setNameModifierLoc(SourceLocation Loc) { NameModifierLoc = Loc; } /// Set location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Build 'if' clause with condition \a Cond. /// /// \param NameModifier [OpenMP 4.1] Directive name modifier of clause. /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param NameModifierLoc Location of directive name modifier. /// \param ColonLoc [OpenMP 4.1] Location of ':'. /// \param EndLoc Ending location of the clause. OMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_if, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond), ColonLoc(ColonLoc), NameModifier(NameModifier), NameModifierLoc(NameModifierLoc) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPIfClause() : OMPClause(llvm::omp::OMPC_if, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } /// Return directive name modifier associated with the clause. OpenMPDirectiveKind getNameModifier() const { return NameModifier; } /// Return the location of directive name modifier. SourceLocation getNameModifierLoc() const { return NameModifierLoc; } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPIfClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_if; } }; /// This represents 'final' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task final(a > 5) /// \endcode /// In this example directive '#pragma omp task' has simple 'final' /// clause with condition 'a > 5'. class OMPFinalClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } public: /// Build 'final' clause with condition \a Cond. /// /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPFinalClause(Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_final, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPFinalClause() : OMPClause(llvm::omp::OMPC_final, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPFinalClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_final; } }; /// This represents 'num_threads' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel num_threads(6) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'num_threads' /// clause with number of threads '6'. class OMPNumThreadsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'num_threads' clause. Stmt *NumThreads = nullptr; /// Set condition. void setNumThreads(Expr *NThreads) { NumThreads = NThreads; } public: /// Build 'num_threads' clause with condition \a NumThreads. /// /// \param NumThreads Number of threads for the construct. /// \param HelperNumThreads Helper Number of threads for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumThreadsClause(Expr *NumThreads, Stmt *HelperNumThreads, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_threads, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumThreads(NumThreads) { setPreInitStmt(HelperNumThreads, CaptureRegion); } /// Build an empty clause. OMPNumThreadsClause() : OMPClause(llvm::omp::OMPC_num_threads, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of threads. Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); } child_range children() { return child_range(&NumThreads, &NumThreads + 1); } const_child_range children() const { return const_child_range(&NumThreads, &NumThreads + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_threads; } }; /// This represents 'safelen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd safelen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'safelen' /// with single expression '4'. /// If the safelen clause is used then no two iterations executed /// concurrently with SIMD instructions can have a greater distance /// in the logical iteration space than its value. The parameter of /// the safelen clause must be a constant positive integer expression. class OMPSafelenClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Safelen = nullptr; /// Set safelen. void setSafelen(Expr *Len) { Safelen = Len; } public: /// Build 'safelen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc), Safelen(Len) {} /// Build an empty clause. explicit OMPSafelenClause() : OMPClause(llvm::omp::OMPC_safelen, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); } child_range children() { return child_range(&Safelen, &Safelen + 1); } const_child_range children() const { return const_child_range(&Safelen, &Safelen + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_safelen; } }; /// This represents 'simdlen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd simdlen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'simdlen' /// with single expression '4'. /// If the 'simdlen' clause is used then it specifies the preferred number of /// iterations to be executed concurrently. The parameter of the 'simdlen' /// clause must be a constant positive integer expression. class OMPSimdlenClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Simdlen = nullptr; /// Set simdlen. void setSimdlen(Expr *Len) { Simdlen = Len; } public: /// Build 'simdlen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_simdlen, StartLoc, EndLoc), LParenLoc(LParenLoc), Simdlen(Len) {} /// Build an empty clause. explicit OMPSimdlenClause() : OMPClause(llvm::omp::OMPC_simdlen, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); } child_range children() { return child_range(&Simdlen, &Simdlen + 1); } const_child_range children() const { return const_child_range(&Simdlen, &Simdlen + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_simdlen; } }; /// This represents the 'sizes' clause in the '#pragma omp tile' directive. /// /// \code /// #pragma omp tile sizes(5,5) /// for (int i = 0; i < 64; ++i) /// for (int j = 0; j < 64; ++j) /// \endcode class OMPSizesClause final : public OMPClause, private llvm::TrailingObjects<OMPSizesClause, Expr *> { friend class OMPClauseReader; friend class llvm::TrailingObjects<OMPSizesClause, Expr *>; /// Location of '('. SourceLocation LParenLoc; /// Number of tile sizes in the clause. unsigned NumSizes; /// Build an empty clause. explicit OMPSizesClause(int NumSizes) : OMPClause(llvm::omp::OMPC_sizes, SourceLocation(), SourceLocation()), NumSizes(NumSizes) {} public: /// Build a 'sizes' AST node. /// /// \param C Context of the AST. /// \param StartLoc Location of the 'sizes' identifier. /// \param LParenLoc Location of '('. /// \param EndLoc Location of ')'. /// \param Sizes Content of the clause. static OMPSizesClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> Sizes); /// Build an empty 'sizes' AST node for deserialization. /// /// \param C Context of the AST. /// \param NumSizes Number of items in the clause. static OMPSizesClause *CreateEmpty(const ASTContext &C, unsigned NumSizes); /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns the number of list items. unsigned getNumSizes() const { return NumSizes; } /// Returns the tile size expressions. MutableArrayRef<Expr *> getSizesRefs() { return MutableArrayRef<Expr *>(static_cast<OMPSizesClause *>(this) ->template getTrailingObjects<Expr *>(), NumSizes); } ArrayRef<Expr *> getSizesRefs() const { return ArrayRef<Expr *>(static_cast<const OMPSizesClause *>(this) ->template getTrailingObjects<Expr *>(), NumSizes); } /// Sets the tile size expressions. void setSizesRefs(ArrayRef<Expr *> VL) { assert(VL.size() == NumSizes); std::copy(VL.begin(), VL.end(), static_cast<OMPSizesClause *>(this) ->template getTrailingObjects<Expr *>()); } child_range children() { MutableArrayRef<Expr *> Sizes = getSizesRefs(); return child_range(reinterpret_cast<Stmt **>(Sizes.begin()), reinterpret_cast<Stmt **>(Sizes.end())); } const_child_range children() const { ArrayRef<Expr *> Sizes = getSizesRefs(); return const_child_range(reinterpret_cast<Stmt *const *>(Sizes.begin()), reinterpret_cast<Stmt *const *>(Sizes.end())); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_sizes; } }; /// Representation of the 'full' clause of the '#pragma omp unroll' directive. /// /// \code /// #pragma omp unroll full /// for (int i = 0; i < 64; ++i) /// \endcode class OMPFullClause final : public OMPClause { friend class OMPClauseReader; /// Build an empty clause. explicit OMPFullClause() : OMPClause(llvm::omp::OMPC_full, {}, {}) {} public: /// Build an AST node for a 'full' clause. /// /// \param C Context of the AST. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. static OMPFullClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Build an empty 'full' AST node for deserialization. /// /// \param C Context of the AST. static OMPFullClause *CreateEmpty(const ASTContext &C); child_range children() { return {child_iterator(), child_iterator()}; } const_child_range children() const { return {const_child_iterator(), const_child_iterator()}; } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_full; } }; /// Representation of the 'partial' clause of the '#pragma omp unroll' /// directive. /// /// \code /// #pragma omp unroll partial(4) /// for (int i = start; i < end; ++i) /// \endcode class OMPPartialClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Optional argument to the clause (unroll factor). Stmt *Factor; /// Build an empty clause. explicit OMPPartialClause() : OMPClause(llvm::omp::OMPC_partial, {}, {}) {} /// Set the unroll factor. void setFactor(Expr *E) { Factor = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Build an AST node for a 'partial' clause. /// /// \param C Context of the AST. /// \param StartLoc Location of the 'partial' identifier. /// \param LParenLoc Location of '('. /// \param EndLoc Location of ')'. /// \param Factor Clause argument. static OMPPartialClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, Expr *Factor); /// Build an empty 'partial' AST node for deserialization. /// /// \param C Context of the AST. static OMPPartialClause *CreateEmpty(const ASTContext &C); /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns the argument of the clause or nullptr if not set. Expr *getFactor() const { return cast_or_null<Expr>(Factor); } child_range children() { return child_range(&Factor, &Factor + 1); } const_child_range children() const { return const_child_range(&Factor, &Factor + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_partial; } }; /// This represents 'collapse' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd collapse(3) /// \endcode /// In this example directive '#pragma omp simd' has clause 'collapse' /// with single expression '3'. /// The parameter must be a constant positive integer expression, it specifies /// the number of nested loops that should be collapsed into a single iteration /// space. class OMPCollapseClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Number of for-loops. Stmt *NumForLoops = nullptr; /// Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// Build 'collapse' clause. /// /// \param Num Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPCollapseClause(Expr *Num, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num) {} /// Build an empty clause. explicit OMPCollapseClause() : OMPClause(llvm::omp::OMPC_collapse, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } const_child_range children() const { return const_child_range(&NumForLoops, &NumForLoops + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_collapse; } }; /// This represents 'default' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel default(shared) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'default' /// clause with kind 'shared'. class OMPDefaultClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'default' clause. llvm::omp::DefaultKind Kind = llvm::omp::OMP_DEFAULT_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clauses. /// /// \param K Argument of clause. void setDefaultKind(llvm::omp::DefaultKind K) { Kind = K; } /// Set argument location. /// /// \param KLoc Argument location. void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'default' clause with argument \a A ('none' or 'shared'). /// /// \param A Argument of the clause ('none' or 'shared'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDefaultClause(llvm::omp::DefaultKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPDefaultClause() : OMPClause(llvm::omp::OMPC_default, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. llvm::omp::DefaultKind getDefaultKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_default; } }; /// This represents 'proc_bind' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel proc_bind(master) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'proc_bind' /// clause with kind 'master'. class OMPProcBindClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'proc_bind' clause. llvm::omp::ProcBindKind Kind = llvm::omp::OMP_PROC_BIND_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Kind of clause. void setProcBindKind(llvm::omp::ProcBindKind K) { Kind = K; } /// Set clause kind location. /// /// \param KLoc Kind location. void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'proc_bind' clause with argument \a A ('master', 'close' or /// 'spread'). /// /// \param A Argument of the clause ('master', 'close' or 'spread'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPProcBindClause(llvm::omp::ProcBindKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPProcBindClause() : OMPClause(llvm::omp::OMPC_proc_bind, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. llvm::omp::ProcBindKind getProcBindKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_proc_bind; } }; /// This represents 'unified_address' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires unified_address /// \endcode /// In this example directive '#pragma omp requires' has 'unified_address' /// clause. class OMPUnifiedAddressClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'unified_address' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_unified_address, StartLoc, EndLoc) {} /// Build an empty clause. OMPUnifiedAddressClause() : OMPClause(llvm::omp::OMPC_unified_address, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_unified_address; } }; /// This represents 'unified_shared_memory' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires unified_shared_memory /// \endcode /// In this example directive '#pragma omp requires' has 'unified_shared_memory' /// clause. class OMPUnifiedSharedMemoryClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'unified_shared_memory' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_unified_shared_memory, StartLoc, EndLoc) {} /// Build an empty clause. OMPUnifiedSharedMemoryClause() : OMPClause(llvm::omp::OMPC_unified_shared_memory, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_unified_shared_memory; } }; /// This represents 'reverse_offload' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires reverse_offload /// \endcode /// In this example directive '#pragma omp requires' has 'reverse_offload' /// clause. class OMPReverseOffloadClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'reverse_offload' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_reverse_offload, StartLoc, EndLoc) {} /// Build an empty clause. OMPReverseOffloadClause() : OMPClause(llvm::omp::OMPC_reverse_offload, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_reverse_offload; } }; /// This represents 'dynamic_allocators' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires dynamic_allocators /// \endcode /// In this example directive '#pragma omp requires' has 'dynamic_allocators' /// clause. class OMPDynamicAllocatorsClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'dynamic_allocators' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_dynamic_allocators, StartLoc, EndLoc) {} /// Build an empty clause. OMPDynamicAllocatorsClause() : OMPClause(llvm::omp::OMPC_dynamic_allocators, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_dynamic_allocators; } }; /// This represents 'atomic_default_mem_order' clause in the '#pragma omp /// requires' directive. /// /// \code /// #pragma omp requires atomic_default_mem_order(seq_cst) /// \endcode /// In this example directive '#pragma omp requires' has simple /// atomic_default_mem_order' clause with kind 'seq_cst'. class OMPAtomicDefaultMemOrderClause final : public OMPClause { friend class OMPClauseReader; /// Location of '(' SourceLocation LParenLoc; /// A kind of the 'atomic_default_mem_order' clause. OpenMPAtomicDefaultMemOrderClauseKind Kind = OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Kind of clause. void setAtomicDefaultMemOrderKind(OpenMPAtomicDefaultMemOrderClauseKind K) { Kind = K; } /// Set clause kind location. /// /// \param KLoc Kind location. void setAtomicDefaultMemOrderKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'atomic_default_mem_order' clause with argument \a A ('seq_cst', /// 'acq_rel' or 'relaxed'). /// /// \param A Argument of the clause ('seq_cst', 'acq_rel' or 'relaxed'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPAtomicDefaultMemOrderClause(OpenMPAtomicDefaultMemOrderClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_atomic_default_mem_order, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPAtomicDefaultMemOrderClause() : OMPClause(llvm::omp::OMPC_atomic_default_mem_order, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the locaiton of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPAtomicDefaultMemOrderClauseKind getAtomicDefaultMemOrderKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getAtomicDefaultMemOrderKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_atomic_default_mem_order; } }; /// This represents 'schedule' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for schedule(static, 3) /// \endcode /// In this example directive '#pragma omp for' has 'schedule' clause with /// arguments 'static' and '3'. class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'schedule' clause. OpenMPScheduleClauseKind Kind = OMPC_SCHEDULE_unknown; /// Modifiers for 'schedule' clause. enum {FIRST, SECOND, NUM_MODIFIERS}; OpenMPScheduleClauseModifier Modifiers[NUM_MODIFIERS]; /// Locations of modifiers. SourceLocation ModifiersLoc[NUM_MODIFIERS]; /// Start location of the schedule ind in source code. SourceLocation KindLoc; /// Location of ',' (if any). SourceLocation CommaLoc; /// Chunk size. Expr *ChunkSize = nullptr; /// Set schedule kind. /// /// \param K Schedule kind. void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; } /// Set the first schedule modifier. /// /// \param M Schedule modifier. void setFirstScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[FIRST] = M; } /// Set the second schedule modifier. /// /// \param M Schedule modifier. void setSecondScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[SECOND] = M; } /// Set location of the first schedule modifier. void setFirstScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[FIRST] = Loc; } /// Set location of the second schedule modifier. void setSecondScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[SECOND] = Loc; } /// Set schedule modifier location. /// /// \param M Schedule modifier location. void setScheduleModifer(OpenMPScheduleClauseModifier M) { if (Modifiers[FIRST] == OMPC_SCHEDULE_MODIFIER_unknown) Modifiers[FIRST] = M; else { assert(Modifiers[SECOND] == OMPC_SCHEDULE_MODIFIER_unknown); Modifiers[SECOND] = M; } } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// Build 'schedule' clause with schedule kind \a Kind and chunk size /// expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind Schedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. /// \param M1 The first modifier applied to 'schedule' clause. /// \param M1Loc Location of the first modifier /// \param M2 The second modifier applied to 'schedule' clause. /// \param M2Loc Location of the second modifier OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize, OpenMPScheduleClauseModifier M1, SourceLocation M1Loc, OpenMPScheduleClauseModifier M2, SourceLocation M2Loc) : OMPClause(llvm::omp::OMPC_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); Modifiers[FIRST] = M1; Modifiers[SECOND] = M2; ModifiersLoc[FIRST] = M1Loc; ModifiersLoc[SECOND] = M2Loc; } /// Build an empty clause. explicit OMPScheduleClause() : OMPClause(llvm::omp::OMPC_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) { Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown; Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown; } /// Get kind of the clause. OpenMPScheduleClauseKind getScheduleKind() const { return Kind; } /// Get the first modifier of the clause. OpenMPScheduleClauseModifier getFirstScheduleModifier() const { return Modifiers[FIRST]; } /// Get the second modifier of the clause. OpenMPScheduleClauseModifier getSecondScheduleModifier() const { return Modifiers[SECOND]; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getScheduleKindLoc() { return KindLoc; } /// Get the first modifier location. SourceLocation getFirstScheduleModifierLoc() const { return ModifiersLoc[FIRST]; } /// Get the second modifier location. SourceLocation getSecondScheduleModifierLoc() const { return ModifiersLoc[SECOND]; } /// Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } const_child_range children() const { auto Children = const_cast<OMPScheduleClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_schedule; } }; /// This represents 'ordered' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for ordered (2) /// \endcode /// In this example directive '#pragma omp for' has 'ordered' clause with /// parameter 2. class OMPOrderedClause final : public OMPClause, private llvm::TrailingObjects<OMPOrderedClause, Expr *> { friend class OMPClauseReader; friend TrailingObjects; /// Location of '('. SourceLocation LParenLoc; /// Number of for-loops. Stmt *NumForLoops = nullptr; /// Real number of loops. unsigned NumberOfLoops = 0; /// Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param NumLoops Number of loops, associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPOrderedClause(Expr *Num, unsigned NumLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num), NumberOfLoops(NumLoops) {} /// Build an empty clause. explicit OMPOrderedClause(unsigned NumLoops) : OMPClause(llvm::omp::OMPC_ordered, SourceLocation(), SourceLocation()), NumberOfLoops(NumLoops) {} /// Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param NumLoops Number of loops, associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. static OMPOrderedClause *Create(const ASTContext &C, Expr *Num, unsigned NumLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Build an empty clause. static OMPOrderedClause* CreateEmpty(const ASTContext &C, unsigned NumLoops); /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } /// Set number of iterations for the specified loop. void setLoopNumIterations(unsigned NumLoop, Expr *NumIterations); /// Get number of iterations for all the loops. ArrayRef<Expr *> getLoopNumIterations() const; /// Set loop counter for the specified loop. void setLoopCounter(unsigned NumLoop, Expr *Counter); /// Get loops counter for the specified loop. Expr *getLoopCounter(unsigned NumLoop); const Expr *getLoopCounter(unsigned NumLoop) const; child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } const_child_range children() const { return const_child_range(&NumForLoops, &NumForLoops + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_ordered; } }; /// This represents 'nowait' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for nowait /// \endcode /// In this example directive '#pragma omp for' has 'nowait' clause. class OMPNowaitClause : public OMPClause { public: /// Build 'nowait' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_nowait, StartLoc, EndLoc) {} /// Build an empty clause. OMPNowaitClause() : OMPClause(llvm::omp::OMPC_nowait, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nowait; } }; /// This represents 'untied' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task untied /// \endcode /// In this example directive '#pragma omp task' has 'untied' clause. class OMPUntiedClause : public OMPClause { public: /// Build 'untied' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_untied, StartLoc, EndLoc) {} /// Build an empty clause. OMPUntiedClause() : OMPClause(llvm::omp::OMPC_untied, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_untied; } }; /// This represents 'mergeable' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task mergeable /// \endcode /// In this example directive '#pragma omp task' has 'mergeable' clause. class OMPMergeableClause : public OMPClause { public: /// Build 'mergeable' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_mergeable, StartLoc, EndLoc) {} /// Build an empty clause. OMPMergeableClause() : OMPClause(llvm::omp::OMPC_mergeable, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_mergeable; } }; /// This represents 'read' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic read /// \endcode /// In this example directive '#pragma omp atomic' has 'read' clause. class OMPReadClause : public OMPClause { public: /// Build 'read' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_read, StartLoc, EndLoc) {} /// Build an empty clause. OMPReadClause() : OMPClause(llvm::omp::OMPC_read, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_read; } }; /// This represents 'write' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic write /// \endcode /// In this example directive '#pragma omp atomic' has 'write' clause. class OMPWriteClause : public OMPClause { public: /// Build 'write' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_write, StartLoc, EndLoc) {} /// Build an empty clause. OMPWriteClause() : OMPClause(llvm::omp::OMPC_write, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_write; } }; /// This represents 'update' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic update /// \endcode /// In this example directive '#pragma omp atomic' has 'update' clause. /// Also, this class represents 'update' clause in '#pragma omp depobj' /// directive. /// /// \code /// #pragma omp depobj(a) update(in) /// \endcode /// In this example directive '#pragma omp depobj' has 'update' clause with 'in' /// dependence kind. class OMPUpdateClause final : public OMPClause, private llvm::TrailingObjects<OMPUpdateClause, SourceLocation, OpenMPDependClauseKind> { friend class OMPClauseReader; friend TrailingObjects; /// true if extended version of the clause for 'depobj' directive. bool IsExtended = false; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<SourceLocation>) const { // 2 locations: for '(' and argument location. return IsExtended ? 2 : 0; } /// Sets the location of '(' in clause for 'depobj' directive. void setLParenLoc(SourceLocation Loc) { assert(IsExtended && "Expected extended clause."); *getTrailingObjects<SourceLocation>() = Loc; } /// Sets the location of '(' in clause for 'depobj' directive. void setArgumentLoc(SourceLocation Loc) { assert(IsExtended && "Expected extended clause."); *std::next(getTrailingObjects<SourceLocation>(), 1) = Loc; } /// Sets the dependence kind for the clause for 'depobj' directive. void setDependencyKind(OpenMPDependClauseKind DK) { assert(IsExtended && "Expected extended clause."); *getTrailingObjects<OpenMPDependClauseKind>() = DK; } /// Build 'update' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc, bool IsExtended) : OMPClause(llvm::omp::OMPC_update, StartLoc, EndLoc), IsExtended(IsExtended) {} /// Build an empty clause. OMPUpdateClause(bool IsExtended) : OMPClause(llvm::omp::OMPC_update, SourceLocation(), SourceLocation()), IsExtended(IsExtended) {} public: /// Creates clause for 'atomic' directive. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. static OMPUpdateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates clause for 'depobj' directive. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ArgumentLoc Location of the argument. /// \param DK Dependence kind. /// \param EndLoc Ending location of the clause. static OMPUpdateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ArgumentLoc, OpenMPDependClauseKind DK, SourceLocation EndLoc); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param IsExtended true if extended clause for 'depobj' directive must be /// created. static OMPUpdateClause *CreateEmpty(const ASTContext &C, bool IsExtended); /// Checks if the clause is the extended clauses for 'depobj' directive. bool isExtended() const { return IsExtended; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } /// Gets the location of '(' in clause for 'depobj' directive. SourceLocation getLParenLoc() const { assert(IsExtended && "Expected extended clause."); return *getTrailingObjects<SourceLocation>(); } /// Gets the location of argument in clause for 'depobj' directive. SourceLocation getArgumentLoc() const { assert(IsExtended && "Expected extended clause."); return *std::next(getTrailingObjects<SourceLocation>(), 1); } /// Gets the dependence kind in clause for 'depobj' directive. OpenMPDependClauseKind getDependencyKind() const { assert(IsExtended && "Expected extended clause."); return *getTrailingObjects<OpenMPDependClauseKind>(); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_update; } }; /// This represents 'capture' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has 'capture' clause. class OMPCaptureClause : public OMPClause { public: /// Build 'capture' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_capture, StartLoc, EndLoc) {} /// Build an empty clause. OMPCaptureClause() : OMPClause(llvm::omp::OMPC_capture, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_capture; } }; /// This represents 'compare' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic compare /// \endcode /// In this example directive '#pragma omp atomic' has 'compare' clause. class OMPCompareClause final : public OMPClause { public: /// Build 'compare' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPCompareClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_compare, StartLoc, EndLoc) {} /// Build an empty clause. OMPCompareClause() : OMPClause(llvm::omp::OMPC_compare, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_compare; } }; /// This represents 'seq_cst' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic seq_cst /// \endcode /// In this example directive '#pragma omp atomic' has 'seq_cst' clause. class OMPSeqCstClause : public OMPClause { public: /// Build 'seq_cst' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_seq_cst, StartLoc, EndLoc) {} /// Build an empty clause. OMPSeqCstClause() : OMPClause(llvm::omp::OMPC_seq_cst, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_seq_cst; } }; /// This represents 'acq_rel' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush acq_rel /// \endcode /// In this example directive '#pragma omp flush' has 'acq_rel' clause. class OMPAcqRelClause final : public OMPClause { public: /// Build 'ack_rel' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_acq_rel, StartLoc, EndLoc) {} /// Build an empty clause. OMPAcqRelClause() : OMPClause(llvm::omp::OMPC_acq_rel, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_acq_rel; } }; /// This represents 'acquire' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush acquire /// \endcode /// In this example directive '#pragma omp flush' has 'acquire' clause. class OMPAcquireClause final : public OMPClause { public: /// Build 'acquire' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_acquire, StartLoc, EndLoc) {} /// Build an empty clause. OMPAcquireClause() : OMPClause(llvm::omp::OMPC_acquire, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_acquire; } }; /// This represents 'release' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush release /// \endcode /// In this example directive '#pragma omp flush' has 'release' clause. class OMPReleaseClause final : public OMPClause { public: /// Build 'release' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_release, StartLoc, EndLoc) {} /// Build an empty clause. OMPReleaseClause() : OMPClause(llvm::omp::OMPC_release, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_release; } }; /// This represents 'relaxed' clause in the '#pragma omp atomic' /// directives. /// /// \code /// #pragma omp atomic relaxed /// \endcode /// In this example directive '#pragma omp atomic' has 'relaxed' clause. class OMPRelaxedClause final : public OMPClause { public: /// Build 'relaxed' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_relaxed, StartLoc, EndLoc) {} /// Build an empty clause. OMPRelaxedClause() : OMPClause(llvm::omp::OMPC_relaxed, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_relaxed; } }; /// This represents clause 'private' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// with the variables 'a' and 'b'. class OMPPrivateClause final : public OMPVarListClause<OMPPrivateClause>, private llvm::TrailingObjects<OMPPrivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPPrivateClause>(llvm::omp::OMPC_private, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPPrivateClause(unsigned N) : OMPVarListClause<OMPPrivateClause>(llvm::omp::OMPC_private, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PrivateVL List of references to private copies with initializers. static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPPrivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_private; } }; /// This represents clause 'firstprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel firstprivate(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'firstprivate' /// with the variables 'a' and 'b'. class OMPFirstprivateClause final : public OMPVarListClause<OMPFirstprivateClause>, public OMPClauseWithPreInit, private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFirstprivateClause>(llvm::omp::OMPC_firstprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPreInit(this) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPFirstprivateClause(unsigned N) : OMPVarListClause<OMPFirstprivateClause>( llvm::omp::OMPC_firstprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPreInit(this) {} /// Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new /// private variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new /// private variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. /// \param PrivateVL List of references to private copies with initializers. /// \param InitVL List of references to auto generated variables used for /// initialization of a single array element. Used if firstprivate variable is /// of array type. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. static OMPFirstprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL, ArrayRef<Expr *> InitVL, Stmt *PreInit); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFirstprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range used_children() const { auto Children = const_cast<OMPFirstprivateClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_firstprivate; } }; /// This represents clause 'lastprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd lastprivate(a,b) /// \endcode /// In this example directive '#pragma omp simd' has clause 'lastprivate' /// with the variables 'a' and 'b'. class OMPLastprivateClause final : public OMPVarListClause<OMPLastprivateClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLastprivateClause, Expr *> { // There are 4 additional tail-allocated arrays at the end of the class: // 1. Contains list of pseudo variables with the default initialization for // each non-firstprivate variables. Used in codegen for initialization of // lastprivate copies. // 2. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents private variables // (for arrays, single array element). // 3. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents original variables // (for arrays, single array element). // 4. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of final assignment performed by the // lastprivate clause. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Optional lastprivate kind, e.g. 'conditional', if specified by user. OpenMPLastprivateModifier LPKind; /// Optional location of the lasptrivate kind, if specified by user. SourceLocation LPKindLoc; /// Optional colon location, if specified by user. SourceLocation ColonLoc; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, unsigned N) : OMPVarListClause<OMPLastprivateClause>(llvm::omp::OMPC_lastprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), LPKind(LPKind), LPKindLoc(LPKindLoc), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPLastprivateClause(unsigned N) : OMPVarListClause<OMPLastprivateClause>( llvm::omp::OMPC_lastprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Get the list of helper expressions for initialization of private /// copies for lastprivate variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent original variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign private copy of the variable to original variable. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } /// Sets lastprivate kind. void setKind(OpenMPLastprivateModifier Kind) { LPKind = Kind; } /// Sets location of the lastprivate kind. void setKindLoc(SourceLocation Loc) { LPKindLoc = Loc; } /// Sets colon symbol location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// private variables (for arrays, single array element). /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// original variables (for arrays, single array element). /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// lastprivate clause. /// \param LPKind Lastprivate kind, e.g. 'conditional'. /// \param LPKindLoc Location of the lastprivate kind. /// \param ColonLoc Location of the ':' symbol if lastprivate kind is used. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLastprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N); /// Lastprivate kind. OpenMPLastprivateModifier getKind() const { return LPKind; } /// Returns the location of the lastprivate kind. SourceLocation getKindLoc() const { return LPKindLoc; } /// Returns the location of the ':' symbol, if any. SourceLocation getColonLoc() const { return ColonLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; /// Set list of helper expressions, required for generation of private /// copies of original lastprivate variables. void setPrivateCopies(ArrayRef<Expr *> PrivateCopies); helper_expr_const_range private_copies() const { return helper_expr_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_range private_copies() { return helper_expr_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPLastprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_lastprivate; } }; /// This represents clause 'shared' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel shared(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'shared' /// with the variables 'a' and 'b'. class OMPSharedClause final : public OMPVarListClause<OMPSharedClause>, private llvm::TrailingObjects<OMPSharedClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPSharedClause>(llvm::omp::OMPC_shared, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPSharedClause(unsigned N) : OMPVarListClause<OMPSharedClause>(llvm::omp::OMPC_shared, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPSharedClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_shared; } }; /// This represents clause 'reduction' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'reduction' /// with operator '+' and the variables 'a' and 'b'. class OMPReductionClause final : public OMPVarListClause<OMPReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Reduction modifier. OpenMPReductionClauseModifier Modifier = OMPC_REDUCTION_unknown; /// Reduction modifier location. SourceLocation ModifierLoc; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, OpenMPReductionClauseModifier Modifier, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPReductionClause>(llvm::omp::OMPC_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), Modifier(Modifier), ModifierLoc(ModifierLoc), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPReductionClause(unsigned N) : OMPVarListClause<OMPReductionClause>(llvm::omp::OMPC_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets reduction modifier. void setModifier(OpenMPReductionClauseModifier M) { Modifier = M; } /// Sets location of the modifier. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private copy of the reduction /// variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent LHS expression in the final /// reduction expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent RHS expression in the final /// reduction expression performed by the reduction clause. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } /// Set list of helper copy operations for inscan reductions. /// The form is: Temps[i] = LHS[i]; void setInscanCopyOps(ArrayRef<Expr *> Ops); /// Get the list of helper inscan copy operations. MutableArrayRef<Expr *> getInscanCopyOps() { return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyOps() const { return llvm::makeArrayRef(getReductionOps().end(), varlist_size()); } /// Set list of helper temp vars for inscan copy array operations. void setInscanCopyArrayTemps(ArrayRef<Expr *> CopyArrayTemps); /// Get the list of helper inscan copy temps. MutableArrayRef<Expr *> getInscanCopyArrayTemps() { return MutableArrayRef<Expr *>(getInscanCopyOps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyArrayTemps() const { return llvm::makeArrayRef(getInscanCopyOps().end(), varlist_size()); } /// Set list of helper temp elements vars for inscan copy array operations. void setInscanCopyArrayElems(ArrayRef<Expr *> CopyArrayElems); /// Get the list of helper inscan copy temps. MutableArrayRef<Expr *> getInscanCopyArrayElems() { return MutableArrayRef<Expr *>(getInscanCopyArrayTemps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyArrayElems() const { return llvm::makeArrayRef(getInscanCopyArrayTemps().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param CopyOps List of copy operations for inscan reductions: /// \code /// TempExprs = LHSExprs; /// \endcode /// \param CopyArrayTemps Temp arrays for prefix sums. /// \param CopyArrayElems Temp arrays for prefix sums. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, OpenMPReductionClauseModifier Modifier, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> CopyOps, ArrayRef<Expr *> CopyArrayTemps, ArrayRef<Expr *> CopyArrayElems, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// \param Modifier Reduction modifier. static OMPReductionClause * CreateEmpty(const ASTContext &C, unsigned N, OpenMPReductionClauseModifier Modifier); /// Returns modifier. OpenMPReductionClauseModifier getModifier() const { return Modifier; } /// Returns modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_const_range copy_ops() const { return helper_expr_const_range(getInscanCopyOps().begin(), getInscanCopyOps().end()); } helper_expr_range copy_ops() { return helper_expr_range(getInscanCopyOps().begin(), getInscanCopyOps().end()); } helper_expr_const_range copy_array_temps() const { return helper_expr_const_range(getInscanCopyArrayTemps().begin(), getInscanCopyArrayTemps().end()); } helper_expr_range copy_array_temps() { return helper_expr_range(getInscanCopyArrayTemps().begin(), getInscanCopyArrayTemps().end()); } helper_expr_const_range copy_array_elems() const { return helper_expr_const_range(getInscanCopyArrayElems().begin(), getInscanCopyArrayElems().end()); } helper_expr_range copy_array_elems() { return helper_expr_range(getInscanCopyArrayElems().begin(), getInscanCopyArrayElems().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range used_children() const { auto Children = const_cast<OMPReductionClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_reduction; } }; /// This represents clause 'task_reduction' in the '#pragma omp taskgroup' /// directives. /// /// \code /// #pragma omp taskgroup task_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp taskgroup' has clause /// 'task_reduction' with operator '+' and the variables 'a' and 'b'. class OMPTaskReductionClause final : public OMPVarListClause<OMPTaskReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPTaskReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPTaskReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPTaskReductionClause>( llvm::omp::OMPC_task_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPTaskReductionClause(unsigned N) : OMPVarListClause<OMPTaskReductionClause>( llvm::omp::OMPC_task_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPTaskReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPTaskReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPTaskReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_task_reduction; } }; /// This represents clause 'in_reduction' in the '#pragma omp task' directives. /// /// \code /// #pragma omp task in_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp task' has clause 'in_reduction' with /// operator '+' and the variables 'a' and 'b'. class OMPInReductionClause final : public OMPVarListClause<OMPInReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPInReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPInReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPInReductionClause>(llvm::omp::OMPC_in_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPInReductionClause(unsigned N) : OMPVarListClause<OMPInReductionClause>( llvm::omp::OMPC_in_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } /// Set list of helper reduction taskgroup descriptors. void setTaskgroupDescriptors(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction taskgroup descriptors. MutableArrayRef<Expr *> getTaskgroupDescriptors() { return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size()); } ArrayRef<const Expr *> getTaskgroupDescriptors() const { return llvm::makeArrayRef(getReductionOps().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param TaskgroupDescriptors List of helper taskgroup descriptors for /// corresponding items in parent taskgroup task_reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPInReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> TaskgroupDescriptors, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPInReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_const_range taskgroup_descriptors() const { return helper_expr_const_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } helper_expr_range taskgroup_descriptors() { return helper_expr_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPInReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_in_reduction; } }; /// This represents clause 'linear' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd linear(a,b : 2) /// \endcode /// In this example directive '#pragma omp simd' has clause 'linear' /// with variables 'a', 'b' and linear step '2'. class OMPLinearClause final : public OMPVarListClause<OMPLinearClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLinearClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Modifier of 'linear' clause. OpenMPLinearClauseKind Modifier = OMPC_LINEAR_val; /// Location of linear modifier if any. SourceLocation ModifierLoc; /// Location of ':'. SourceLocation ColonLoc; /// Sets the linear step for clause. void setStep(Expr *Step) { *(getFinals().end()) = Step; } /// Sets the expression to calculate linear step for clause. void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; } /// Build 'linear' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPLinearClause>(llvm::omp::OMPC_linear, StartLoc, LParenLoc, EndLoc, NumVars), OMPClauseWithPostUpdate(this), Modifier(Modifier), ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPLinearClause(unsigned NumVars) : OMPVarListClause<OMPLinearClause>(llvm::omp::OMPC_linear, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), OMPClauseWithPostUpdate(this) {} /// Gets the list of initial values for linear variables. /// /// There are NumVars expressions with initial values allocated after the /// varlist, they are followed by NumVars update expressions (used to update /// the linear variable's value on current iteration) and they are followed by /// NumVars final expressions (used to calculate the linear variable's /// value after the loop body). After these lists, there are 2 helper /// expressions - linear step and a helper to calculate it before the /// loop body (used when the linear step is not constant): /// /// { Vars[] /* in OMPVarListClause */; Privates[]; Inits[]; Updates[]; /// Finals[]; Step; CalcStep; } MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Sets the list of update expressions for linear variables. MutableArrayRef<Expr *> getUpdates() { return MutableArrayRef<Expr *>(getInits().end(), varlist_size()); } ArrayRef<const Expr *> getUpdates() const { return llvm::makeArrayRef(getInits().end(), varlist_size()); } /// Sets the list of final update expressions for linear variables. MutableArrayRef<Expr *> getFinals() { return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size()); } ArrayRef<const Expr *> getFinals() const { return llvm::makeArrayRef(getUpdates().end(), varlist_size()); } /// Gets the list of used expressions for linear variables. MutableArrayRef<Expr *> getUsedExprs() { return MutableArrayRef<Expr *>(getFinals().end() + 2, varlist_size() + 1); } ArrayRef<const Expr *> getUsedExprs() const { return llvm::makeArrayRef(getFinals().end() + 2, varlist_size() + 1); } /// Sets the list of the copies of original linear variables. /// \param PL List of expressions. void setPrivates(ArrayRef<Expr *> PL); /// Sets the list of the initial values for linear variables. /// \param IL List of expressions. void setInits(ArrayRef<Expr *> IL); public: /// Creates clause with a list of variables \a VL and a linear step /// \a Step. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Modifier Modifier of 'linear' clause. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PL List of private copies of original variables. /// \param IL List of initial values for the variables. /// \param Step Linear step. /// \param CalcStep Calculation of the linear step. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// Set modifier. void setModifier(OpenMPLinearClauseKind Kind) { Modifier = Kind; } /// Return modifier. OpenMPLinearClauseKind getModifier() const { return Modifier; } /// Set modifier location. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Return modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } /// Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns linear step. Expr *getStep() { return *(getFinals().end()); } /// Returns linear step. const Expr *getStep() const { return *(getFinals().end()); } /// Returns expression to calculate linear step. Expr *getCalcStep() { return *(getFinals().end() + 1); } /// Returns expression to calculate linear step. const Expr *getCalcStep() const { return *(getFinals().end() + 1); } /// Sets the list of update expressions for linear variables. /// \param UL List of expressions. void setUpdates(ArrayRef<Expr *> UL); /// Sets the list of final update expressions for linear variables. /// \param FL List of expressions. void setFinals(ArrayRef<Expr *> FL); /// Sets the list of used expressions for the linear clause. void setUsedExprs(ArrayRef<Expr *> UE); using privates_iterator = MutableArrayRef<Expr *>::iterator; using privates_const_iterator = ArrayRef<const Expr *>::iterator; using privates_range = llvm::iterator_range<privates_iterator>; using privates_const_range = llvm::iterator_range<privates_const_iterator>; privates_range privates() { return privates_range(getPrivates().begin(), getPrivates().end()); } privates_const_range privates() const { return privates_const_range(getPrivates().begin(), getPrivates().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } using updates_iterator = MutableArrayRef<Expr *>::iterator; using updates_const_iterator = ArrayRef<const Expr *>::iterator; using updates_range = llvm::iterator_range<updates_iterator>; using updates_const_range = llvm::iterator_range<updates_const_iterator>; updates_range updates() { return updates_range(getUpdates().begin(), getUpdates().end()); } updates_const_range updates() const { return updates_const_range(getUpdates().begin(), getUpdates().end()); } using finals_iterator = MutableArrayRef<Expr *>::iterator; using finals_const_iterator = ArrayRef<const Expr *>::iterator; using finals_range = llvm::iterator_range<finals_iterator>; using finals_const_range = llvm::iterator_range<finals_const_iterator>; finals_range finals() { return finals_range(getFinals().begin(), getFinals().end()); } finals_const_range finals() const { return finals_const_range(getFinals().begin(), getFinals().end()); } using used_expressions_iterator = MutableArrayRef<Expr *>::iterator; using used_expressions_const_iterator = ArrayRef<const Expr *>::iterator; using used_expressions_range = llvm::iterator_range<used_expressions_iterator>; using used_expressions_const_range = llvm::iterator_range<used_expressions_const_iterator>; used_expressions_range used_expressions() { return finals_range(getUsedExprs().begin(), getUsedExprs().end()); } used_expressions_const_range used_expressions() const { return finals_const_range(getUsedExprs().begin(), getUsedExprs().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPLinearClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPLinearClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_linear; } }; /// This represents clause 'aligned' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd aligned(a,b : 8) /// \endcode /// In this example directive '#pragma omp simd' has clause 'aligned' /// with variables 'a', 'b' and alignment '8'. class OMPAlignedClause final : public OMPVarListClause<OMPAlignedClause>, private llvm::TrailingObjects<OMPAlignedClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Sets the alignment for clause. void setAlignment(Expr *A) { *varlist_end() = A; } /// Build 'aligned' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(llvm::omp::OMPC_aligned, StartLoc, LParenLoc, EndLoc, NumVars), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPAlignedClause(unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(llvm::omp::OMPC_aligned, SourceLocation(), SourceLocation(), SourceLocation(), NumVars) {} public: /// Creates clause with a list of variables \a VL and alignment \a A. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param A Alignment. static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns alignment. Expr *getAlignment() { return *varlist_end(); } /// Returns alignment. const Expr *getAlignment() const { return *varlist_end(); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPAlignedClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_aligned; } }; /// This represents clause 'copyin' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel copyin(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'copyin' /// with the variables 'a' and 'b'. class OMPCopyinClause final : public OMPVarListClause<OMPCopyinClause>, private llvm::TrailingObjects<OMPCopyinClause, Expr *> { // Class has 3 additional tail allocated arrays: // 1. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents sources. // 2. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents destinations. // 3. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of propagation of master's thread values of // threadprivate variables to local instances of that variables in other // implicit threads. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyinClause>(llvm::omp::OMPC_copyin, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyinClause(unsigned N) : OMPVarListClause<OMPCopyinClause>(llvm::omp::OMPC_copyin, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyin clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyin clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of propagation of master's thread values of /// threadprivate variables to local instances of that variables in other /// implicit threads. static OMPCopyinClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPCopyinClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_copyin; } }; /// This represents clause 'copyprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp single copyprivate(a,b) /// \endcode /// In this example directive '#pragma omp single' has clause 'copyprivate' /// with the variables 'a' and 'b'. class OMPCopyprivateClause final : public OMPVarListClause<OMPCopyprivateClause>, private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyprivateClause>(llvm::omp::OMPC_copyprivate, StartLoc, LParenLoc, EndLoc, N) { } /// Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyprivateClause(unsigned N) : OMPVarListClause<OMPCopyprivateClause>( llvm::omp::OMPC_copyprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// copyprivate clause. static OMPCopyprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPCopyprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_copyprivate; } }; /// This represents implicit clause 'flush' for the '#pragma omp flush' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// flush' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has implicit clause 'flush' /// with the variables 'a' and 'b'. class OMPFlushClause final : public OMPVarListClause<OMPFlushClause>, private llvm::TrailingObjects<OMPFlushClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFlushClause>(llvm::omp::OMPC_flush, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPFlushClause(unsigned N) : OMPVarListClause<OMPFlushClause>(llvm::omp::OMPC_flush, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFlushClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_flush; } }; /// This represents implicit clause 'depobj' for the '#pragma omp depobj' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// depobj' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp depobj(a) destroy /// \endcode /// In this example directive '#pragma omp depobj' has implicit clause 'depobj' /// with the depobj 'a'. class OMPDepobjClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Chunk size. Expr *Depobj = nullptr; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDepobjClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_depobj, StartLoc, EndLoc), LParenLoc(LParenLoc) {} /// Build an empty clause. /// explicit OMPDepobjClause() : OMPClause(llvm::omp::OMPC_depobj, SourceLocation(), SourceLocation()) {} void setDepobj(Expr *E) { Depobj = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Creates clause. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param Depobj depobj expression associated with the 'depobj' directive. static OMPDepobjClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, Expr *Depobj); /// Creates an empty clause. /// /// \param C AST context. static OMPDepobjClause *CreateEmpty(const ASTContext &C); /// Returns depobj expression associated with the clause. Expr *getDepobj() { return Depobj; } const Expr *getDepobj() const { return Depobj; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&Depobj), reinterpret_cast<Stmt **>(&Depobj) + 1); } const_child_range children() const { auto Children = const_cast<OMPDepobjClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_depobj; } }; /// This represents implicit clause 'depend' for the '#pragma omp task' /// directive. /// /// \code /// #pragma omp task depend(in:a,b) /// \endcode /// In this example directive '#pragma omp task' with clause 'depend' with the /// variables 'a' and 'b' with dependency 'in'. class OMPDependClause final : public OMPVarListClause<OMPDependClause>, private llvm::TrailingObjects<OMPDependClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Dependency type (one of in, out, inout). OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; /// Dependency type location. SourceLocation DepLoc; /// Colon location. SourceLocation ColonLoc; /// Number of loops, associated with the depend clause. unsigned NumLoops = 0; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// \param NumLoops Number of loops that is associated with this depend /// clause. OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N, unsigned NumLoops) : OMPVarListClause<OMPDependClause>(llvm::omp::OMPC_depend, StartLoc, LParenLoc, EndLoc, N), NumLoops(NumLoops) {} /// Build an empty clause. /// /// \param N Number of variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. explicit OMPDependClause(unsigned N, unsigned NumLoops) : OMPVarListClause<OMPDependClause>(llvm::omp::OMPC_depend, SourceLocation(), SourceLocation(), SourceLocation(), N), NumLoops(NumLoops) {} /// Set dependency kind. void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; } /// Set dependency kind and its location. void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Sets optional dependency modifier. void setModifier(Expr *DepModifier); public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param DepKind Dependency type. /// \param DepLoc Location of the dependency type. /// \param ColonLoc Colon location. /// \param VL List of references to the variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. static OMPDependClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL, unsigned NumLoops); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N, unsigned NumLoops); /// Get dependency type. OpenMPDependClauseKind getDependencyKind() const { return DepKind; } /// Return optional depend modifier. Expr *getModifier(); const Expr *getModifier() const { return const_cast<OMPDependClause *>(this)->getModifier(); } /// Get dependency type location. SourceLocation getDependencyLoc() const { return DepLoc; } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } /// Get number of loops associated with the clause. unsigned getNumLoops() const { return NumLoops; } /// Set the loop data for the depend clauses with 'sink|source' kind of /// dependency. void setLoopData(unsigned NumLoop, Expr *Cnt); /// Get the loop data. Expr *getLoopData(unsigned NumLoop); const Expr *getLoopData(unsigned NumLoop) const; child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPDependClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_depend; } }; /// This represents 'device' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp target device(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'device' /// with single expression 'a'. class OMPDeviceClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Device clause modifier. OpenMPDeviceClauseModifier Modifier = OMPC_DEVICE_unknown; /// Location of the modifier. SourceLocation ModifierLoc; /// Device number. Stmt *Device = nullptr; /// Set the device number. /// /// \param E Device number. void setDevice(Expr *E) { Device = E; } /// Sets modifier. void setModifier(OpenMPDeviceClauseModifier M) { Modifier = M; } /// Setst modifier location. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } public: /// Build 'device' clause. /// /// \param Modifier Clause modifier. /// \param E Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param ModifierLoc Modifier location. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_device, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Modifier(Modifier), ModifierLoc(ModifierLoc), Device(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPDeviceClause() : OMPClause(llvm::omp::OMPC_device, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return device number. Expr *getDevice() { return cast<Expr>(Device); } /// Return device number. Expr *getDevice() const { return cast<Expr>(Device); } /// Gets modifier. OpenMPDeviceClauseModifier getModifier() const { return Modifier; } /// Gets modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } child_range children() { return child_range(&Device, &Device + 1); } const_child_range children() const { return const_child_range(&Device, &Device + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_device; } }; /// This represents 'threads' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered threads /// \endcode /// In this example directive '#pragma omp ordered' has simple 'threads' clause. class OMPThreadsClause : public OMPClause { public: /// Build 'threads' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_threads, StartLoc, EndLoc) {} /// Build an empty clause. OMPThreadsClause() : OMPClause(llvm::omp::OMPC_threads, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_threads; } }; /// This represents 'simd' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered simd /// \endcode /// In this example directive '#pragma omp ordered' has simple 'simd' clause. class OMPSIMDClause : public OMPClause { public: /// Build 'simd' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_simd, StartLoc, EndLoc) {} /// Build an empty clause. OMPSIMDClause() : OMPClause(llvm::omp::OMPC_simd, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_simd; } }; /// Struct that defines common infrastructure to handle mappable /// expressions used in OpenMP clauses. class OMPClauseMappableExprCommon { public: /// Class that represents a component of a mappable expression. E.g. /// for an expression S.a, the first component is a declaration reference /// expression associated with 'S' and the second is a member expression /// associated with the field declaration 'a'. If the expression is an array /// subscript it may not have any associated declaration. In that case the /// associated declaration is set to nullptr. class MappableComponent { /// Pair of Expression and Non-contiguous pair associated with the /// component. llvm::PointerIntPair<Expr *, 1, bool> AssociatedExpressionNonContiguousPr; /// Declaration associated with the declaration. If the component does /// not have a declaration (e.g. array subscripts or section), this is set /// to nullptr. ValueDecl *AssociatedDeclaration = nullptr; public: explicit MappableComponent() = default; explicit MappableComponent(Expr *AssociatedExpression, ValueDecl *AssociatedDeclaration, bool IsNonContiguous) : AssociatedExpressionNonContiguousPr(AssociatedExpression, IsNonContiguous), AssociatedDeclaration( AssociatedDeclaration ? cast<ValueDecl>(AssociatedDeclaration->getCanonicalDecl()) : nullptr) {} Expr *getAssociatedExpression() const { return AssociatedExpressionNonContiguousPr.getPointer(); } bool isNonContiguous() const { return AssociatedExpressionNonContiguousPr.getInt(); } ValueDecl *getAssociatedDeclaration() const { return AssociatedDeclaration; } }; // List of components of an expression. This first one is the whole // expression and the last one is the base expression. using MappableExprComponentList = SmallVector<MappableComponent, 8>; using MappableExprComponentListRef = ArrayRef<MappableComponent>; // List of all component lists associated to the same base declaration. // E.g. if both 'S.a' and 'S.b' are a mappable expressions, each will have // their component list but the same base declaration 'S'. using MappableExprComponentLists = SmallVector<MappableExprComponentList, 8>; using MappableExprComponentListsRef = ArrayRef<MappableExprComponentList>; protected: // Return the total number of elements in a list of component lists. static unsigned getComponentsTotalNumber(MappableExprComponentListsRef ComponentLists); // Return the total number of elements in a list of declarations. All // declarations are expected to be canonical. static unsigned getUniqueDeclarationsTotalNumber(ArrayRef<const ValueDecl *> Declarations); }; /// This structure contains all sizes needed for by an /// OMPMappableExprListClause. struct OMPMappableExprListSizeTy { /// Number of expressions listed. unsigned NumVars; /// Number of unique base declarations. unsigned NumUniqueDeclarations; /// Number of component lists. unsigned NumComponentLists; /// Total number of expression components. unsigned NumComponents; OMPMappableExprListSizeTy() = default; OMPMappableExprListSizeTy(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : NumVars(NumVars), NumUniqueDeclarations(NumUniqueDeclarations), NumComponentLists(NumComponentLists), NumComponents(NumComponents) {} }; /// This represents clauses with a list of expressions that are mappable. /// Examples of these clauses are 'map' in /// '#pragma omp target [enter|exit] [data]...' directives, and 'to' and 'from /// in '#pragma omp target update...' directives. template <class T> class OMPMappableExprListClause : public OMPVarListClause<T>, public OMPClauseMappableExprCommon { friend class OMPClauseReader; /// Number of unique declarations in this clause. unsigned NumUniqueDeclarations; /// Number of component lists in this clause. unsigned NumComponentLists; /// Total number of components in this clause. unsigned NumComponents; /// Whether this clause is possible to have user-defined mappers associated. /// It should be true for map, to, and from clauses, and false for /// use_device_ptr and is_device_ptr. const bool SupportsMapper; /// C++ nested name specifier for the associated user-defined mapper. NestedNameSpecifierLoc MapperQualifierLoc; /// The associated user-defined mapper identifier information. DeclarationNameInfo MapperIdInfo; protected: /// Build a clause for \a NumUniqueDeclarations declarations, \a /// NumComponentLists total component lists, and \a NumComponents total /// components. /// /// \param K Kind of the clause. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. /// \param SupportsMapper Indicates whether this clause is possible to have /// user-defined mappers associated. /// \param MapperQualifierLocPtr C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfoPtr The identifier of associated user-defined mapper. OMPMappableExprListClause( OpenMPClauseKind K, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes, bool SupportsMapper = false, NestedNameSpecifierLoc *MapperQualifierLocPtr = nullptr, DeclarationNameInfo *MapperIdInfoPtr = nullptr) : OMPVarListClause<T>(K, Locs.StartLoc, Locs.LParenLoc, Locs.EndLoc, Sizes.NumVars), NumUniqueDeclarations(Sizes.NumUniqueDeclarations), NumComponentLists(Sizes.NumComponentLists), NumComponents(Sizes.NumComponents), SupportsMapper(SupportsMapper) { if (MapperQualifierLocPtr) MapperQualifierLoc = *MapperQualifierLocPtr; if (MapperIdInfoPtr) MapperIdInfo = *MapperIdInfoPtr; } /// Get the unique declarations that are in the trailing objects of the /// class. MutableArrayRef<ValueDecl *> getUniqueDeclsRef() { return MutableArrayRef<ValueDecl *>( static_cast<T *>(this)->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// Get the unique declarations that are in the trailing objects of the /// class. ArrayRef<ValueDecl *> getUniqueDeclsRef() const { return ArrayRef<ValueDecl *>( static_cast<const T *>(this) ->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// Set the unique declarations that are in the trailing objects of the /// class. void setUniqueDecls(ArrayRef<ValueDecl *> UDs) { assert(UDs.size() == NumUniqueDeclarations && "Unexpected amount of unique declarations."); std::copy(UDs.begin(), UDs.end(), getUniqueDeclsRef().begin()); } /// Get the number of lists per declaration that are in the trailing /// objects of the class. MutableArrayRef<unsigned> getDeclNumListsRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// Get the number of lists per declaration that are in the trailing /// objects of the class. ArrayRef<unsigned> getDeclNumListsRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// Set the number of lists per declaration that are in the trailing /// objects of the class. void setDeclNumLists(ArrayRef<unsigned> DNLs) { assert(DNLs.size() == NumUniqueDeclarations && "Unexpected amount of list numbers."); std::copy(DNLs.begin(), DNLs.end(), getDeclNumListsRef().begin()); } /// Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. MutableArrayRef<unsigned> getComponentListSizesRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. ArrayRef<unsigned> getComponentListSizesRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// Set the cumulative component lists sizes that are in the trailing /// objects of the class. void setComponentListSizes(ArrayRef<unsigned> CLSs) { assert(CLSs.size() == NumComponentLists && "Unexpected amount of component lists."); std::copy(CLSs.begin(), CLSs.end(), getComponentListSizesRef().begin()); } /// Get the components that are in the trailing objects of the class. MutableArrayRef<MappableComponent> getComponentsRef() { return MutableArrayRef<MappableComponent>( static_cast<T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// Get the components that are in the trailing objects of the class. ArrayRef<MappableComponent> getComponentsRef() const { return ArrayRef<MappableComponent>( static_cast<const T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// Set the components that are in the trailing objects of the class. /// This requires the list sizes so that it can also fill the original /// expressions, which are the first component of each list. void setComponents(ArrayRef<MappableComponent> Components, ArrayRef<unsigned> CLSs) { assert(Components.size() == NumComponents && "Unexpected amount of component lists."); assert(CLSs.size() == NumComponentLists && "Unexpected amount of list sizes."); std::copy(Components.begin(), Components.end(), getComponentsRef().begin()); } /// Fill the clause information from the list of declarations and /// associated component lists. void setClauseInfo(ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists) { // Perform some checks to make sure the data sizes are consistent with the // information available when the clause was created. assert(getUniqueDeclarationsTotalNumber(Declarations) == NumUniqueDeclarations && "Unexpected number of mappable expression info entries!"); assert(getComponentsTotalNumber(ComponentLists) == NumComponents && "Unexpected total number of components!"); assert(Declarations.size() == ComponentLists.size() && "Declaration and component lists size is not consistent!"); assert(Declarations.size() == NumComponentLists && "Unexpected declaration and component lists size!"); // Organize the components by declaration and retrieve the original // expression. Original expressions are always the first component of the // mappable component list. llvm::MapVector<ValueDecl *, SmallVector<MappableExprComponentListRef, 8>> ComponentListMap; { auto CI = ComponentLists.begin(); for (auto DI = Declarations.begin(), DE = Declarations.end(); DI != DE; ++DI, ++CI) { assert(!CI->empty() && "Invalid component list!"); ComponentListMap[*DI].push_back(*CI); } } // Iterators of the target storage. auto UniqueDeclarations = getUniqueDeclsRef(); auto UDI = UniqueDeclarations.begin(); auto DeclNumLists = getDeclNumListsRef(); auto DNLI = DeclNumLists.begin(); auto ComponentListSizes = getComponentListSizesRef(); auto CLSI = ComponentListSizes.begin(); auto Components = getComponentsRef(); auto CI = Components.begin(); // Variable to compute the accumulation of the number of components. unsigned PrevSize = 0u; // Scan all the declarations and associated component lists. for (auto &M : ComponentListMap) { // The declaration. auto *D = M.first; // The component lists. auto CL = M.second; // Initialize the entry. *UDI = D; ++UDI; *DNLI = CL.size(); ++DNLI; // Obtain the cumulative sizes and concatenate all the components in the // reserved storage. for (auto C : CL) { // Accumulate with the previous size. PrevSize += C.size(); // Save the size. *CLSI = PrevSize; ++CLSI; // Append components after the current components iterator. CI = std::copy(C.begin(), C.end(), CI); } } } /// Set the nested name specifier of associated user-defined mapper. void setMapperQualifierLoc(NestedNameSpecifierLoc NNSL) { MapperQualifierLoc = NNSL; } /// Set the name of associated user-defined mapper. void setMapperIdInfo(DeclarationNameInfo MapperId) { MapperIdInfo = MapperId; } /// Get the user-defined mapper references that are in the trailing objects of /// the class. MutableArrayRef<Expr *> getUDMapperRefs() { assert(SupportsMapper && "Must be a clause that is possible to have user-defined mappers"); return llvm::makeMutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>() + OMPVarListClause<T>::varlist_size(), OMPVarListClause<T>::varlist_size()); } /// Get the user-defined mappers references that are in the trailing objects /// of the class. ArrayRef<Expr *> getUDMapperRefs() const { assert(SupportsMapper && "Must be a clause that is possible to have user-defined mappers"); return llvm::makeArrayRef<Expr *>( static_cast<const T *>(this)->template getTrailingObjects<Expr *>() + OMPVarListClause<T>::varlist_size(), OMPVarListClause<T>::varlist_size()); } /// Set the user-defined mappers that are in the trailing objects of the /// class. void setUDMapperRefs(ArrayRef<Expr *> DMDs) { assert(DMDs.size() == OMPVarListClause<T>::varlist_size() && "Unexpected number of user-defined mappers."); assert(SupportsMapper && "Must be a clause that is possible to have user-defined mappers"); std::copy(DMDs.begin(), DMDs.end(), getUDMapperRefs().begin()); } public: /// Return the number of unique base declarations in this clause. unsigned getUniqueDeclarationsNum() const { return NumUniqueDeclarations; } /// Return the number of lists derived from the clause expressions. unsigned getTotalComponentListNum() const { return NumComponentLists; } /// Return the total number of components in all lists derived from the /// clause. unsigned getTotalComponentsNum() const { return NumComponents; } /// Gets the nested name specifier for associated user-defined mapper. NestedNameSpecifierLoc getMapperQualifierLoc() const { return MapperQualifierLoc; } /// Gets the name info for associated user-defined mapper. const DeclarationNameInfo &getMapperIdInfo() const { return MapperIdInfo; } /// Iterator that browse the components by lists. It also allows /// browsing components of a single declaration. class const_component_lists_iterator : public llvm::iterator_adaptor_base< const_component_lists_iterator, MappableExprComponentListRef::const_iterator, std::forward_iterator_tag, MappableComponent, ptrdiff_t, MappableComponent, MappableComponent> { // The declaration the iterator currently refers to. ArrayRef<ValueDecl *>::iterator DeclCur; // The list number associated with the current declaration. ArrayRef<unsigned>::iterator NumListsCur; // Whether this clause is possible to have user-defined mappers associated. const bool SupportsMapper; // The user-defined mapper associated with the current declaration. ArrayRef<Expr *>::iterator MapperCur; // Remaining lists for the current declaration. unsigned RemainingLists = 0; // The cumulative size of the previous list, or zero if there is no previous // list. unsigned PrevListSize = 0; // The cumulative sizes of the current list - it will delimit the remaining // range of interest. ArrayRef<unsigned>::const_iterator ListSizeCur; ArrayRef<unsigned>::const_iterator ListSizeEnd; // Iterator to the end of the components storage. MappableExprComponentListRef::const_iterator End; public: /// Construct an iterator that scans all lists. explicit const_component_lists_iterator( ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components, bool SupportsMapper, ArrayRef<Expr *> Mappers) : const_component_lists_iterator::iterator_adaptor_base( Components.begin()), DeclCur(UniqueDecls.begin()), NumListsCur(DeclsListNum.begin()), SupportsMapper(SupportsMapper), ListSizeCur(CumulativeListSizes.begin()), ListSizeEnd(CumulativeListSizes.end()), End(Components.end()) { assert(UniqueDecls.size() == DeclsListNum.size() && "Inconsistent number of declarations and list sizes!"); if (!DeclsListNum.empty()) RemainingLists = *NumListsCur; if (SupportsMapper) MapperCur = Mappers.begin(); } /// Construct an iterator that scan lists for a given declaration \a /// Declaration. explicit const_component_lists_iterator( const ValueDecl *Declaration, ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components, bool SupportsMapper, ArrayRef<Expr *> Mappers) : const_component_lists_iterator(UniqueDecls, DeclsListNum, CumulativeListSizes, Components, SupportsMapper, Mappers) { // Look for the desired declaration. While we are looking for it, we // update the state so that we know the component where a given list // starts. for (; DeclCur != UniqueDecls.end(); ++DeclCur, ++NumListsCur) { if (*DeclCur == Declaration) break; assert(*NumListsCur > 0 && "No lists associated with declaration??"); // Skip the lists associated with the current declaration, but save the // last list size that was skipped. std::advance(ListSizeCur, *NumListsCur - 1); PrevListSize = *ListSizeCur; ++ListSizeCur; if (SupportsMapper) ++MapperCur; } // If we didn't find any declaration, advance the iterator to after the // last component and set remaining lists to zero. if (ListSizeCur == CumulativeListSizes.end()) { this->I = End; RemainingLists = 0u; return; } // Set the remaining lists with the total number of lists of the current // declaration. RemainingLists = *NumListsCur; // Adjust the list size end iterator to the end of the relevant range. ListSizeEnd = ListSizeCur; std::advance(ListSizeEnd, RemainingLists); // Given that the list sizes are cumulative, the index of the component // that start the list is the size of the previous list. std::advance(this->I, PrevListSize); } // Return the array with the current list. The sizes are cumulative, so the // array size is the difference between the current size and previous one. std::tuple<const ValueDecl *, MappableExprComponentListRef, const ValueDecl *> operator*() const { assert(ListSizeCur != ListSizeEnd && "Invalid iterator!"); const ValueDecl *Mapper = nullptr; if (SupportsMapper && *MapperCur) Mapper = cast<ValueDecl>(cast<DeclRefExpr>(*MapperCur)->getDecl()); return std::make_tuple( *DeclCur, MappableExprComponentListRef(&*this->I, *ListSizeCur - PrevListSize), Mapper); } std::tuple<const ValueDecl *, MappableExprComponentListRef, const ValueDecl *> operator->() const { return **this; } // Skip the components of the current list. const_component_lists_iterator &operator++() { assert(ListSizeCur != ListSizeEnd && RemainingLists && "Invalid iterator!"); // If we don't have more lists just skip all the components. Otherwise, // advance the iterator by the number of components in the current list. if (std::next(ListSizeCur) == ListSizeEnd) { this->I = End; RemainingLists = 0; } else { std::advance(this->I, *ListSizeCur - PrevListSize); PrevListSize = *ListSizeCur; // We are done with a declaration, move to the next one. if (!(--RemainingLists)) { ++DeclCur; ++NumListsCur; RemainingLists = *NumListsCur; assert(RemainingLists && "No lists in the following declaration??"); } } ++ListSizeCur; if (SupportsMapper) ++MapperCur; return *this; } }; using const_component_lists_range = llvm::iterator_range<const_component_lists_iterator>; /// Iterators for all component lists. const_component_lists_iterator component_lists_begin() const { return const_component_lists_iterator( getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef(), SupportsMapper, SupportsMapper ? getUDMapperRefs() : llvm::None); } const_component_lists_iterator component_lists_end() const { return const_component_lists_iterator( ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(), MappableExprComponentListRef(getComponentsRef().end(), getComponentsRef().end()), SupportsMapper, llvm::None); } const_component_lists_range component_lists() const { return {component_lists_begin(), component_lists_end()}; } /// Iterators for component lists associated with the provided /// declaration. const_component_lists_iterator decl_component_lists_begin(const ValueDecl *VD) const { return const_component_lists_iterator( VD, getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef(), SupportsMapper, SupportsMapper ? getUDMapperRefs() : llvm::None); } const_component_lists_iterator decl_component_lists_end() const { return component_lists_end(); } const_component_lists_range decl_component_lists(const ValueDecl *VD) const { return {decl_component_lists_begin(VD), decl_component_lists_end()}; } /// Iterators to access all the declarations, number of lists, list sizes, and /// components. using const_all_decls_iterator = ArrayRef<ValueDecl *>::iterator; using const_all_decls_range = llvm::iterator_range<const_all_decls_iterator>; const_all_decls_range all_decls() const { auto A = getUniqueDeclsRef(); return const_all_decls_range(A.begin(), A.end()); } using const_all_num_lists_iterator = ArrayRef<unsigned>::iterator; using const_all_num_lists_range = llvm::iterator_range<const_all_num_lists_iterator>; const_all_num_lists_range all_num_lists() const { auto A = getDeclNumListsRef(); return const_all_num_lists_range(A.begin(), A.end()); } using const_all_lists_sizes_iterator = ArrayRef<unsigned>::iterator; using const_all_lists_sizes_range = llvm::iterator_range<const_all_lists_sizes_iterator>; const_all_lists_sizes_range all_lists_sizes() const { auto A = getComponentListSizesRef(); return const_all_lists_sizes_range(A.begin(), A.end()); } using const_all_components_iterator = ArrayRef<MappableComponent>::iterator; using const_all_components_range = llvm::iterator_range<const_all_components_iterator>; const_all_components_range all_components() const { auto A = getComponentsRef(); return const_all_components_range(A.begin(), A.end()); } using mapperlist_iterator = MutableArrayRef<Expr *>::iterator; using mapperlist_const_iterator = ArrayRef<const Expr *>::iterator; using mapperlist_range = llvm::iterator_range<mapperlist_iterator>; using mapperlist_const_range = llvm::iterator_range<mapperlist_const_iterator>; mapperlist_iterator mapperlist_begin() { return getUDMapperRefs().begin(); } mapperlist_iterator mapperlist_end() { return getUDMapperRefs().end(); } mapperlist_const_iterator mapperlist_begin() const { return getUDMapperRefs().begin(); } mapperlist_const_iterator mapperlist_end() const { return getUDMapperRefs().end(); } mapperlist_range mapperlists() { return mapperlist_range(mapperlist_begin(), mapperlist_end()); } mapperlist_const_range mapperlists() const { return mapperlist_const_range(mapperlist_begin(), mapperlist_end()); } }; /// This represents clause 'map' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target map(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause 'map' /// with the variables 'a' and 'b'. class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>, private llvm::TrailingObjects< OMPMapClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } private: /// Map-type-modifiers for the 'map' clause. OpenMPMapModifierKind MapTypeModifiers[NumberOfOMPMapClauseModifiers] = { OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown}; /// Location of map-type-modifiers for the 'map' clause. SourceLocation MapTypeModifiersLoc[NumberOfOMPMapClauseModifiers]; /// Map type for the 'map' clause. OpenMPMapClauseKind MapType = OMPC_MAP_unknown; /// Is this an implicit map type or not. bool MapTypeIsImplicit = false; /// Location of the map type. SourceLocation MapLoc; /// Colon location. SourceLocation ColonLoc; /// Build a clause for \a NumVars listed expressions, \a /// NumUniqueDeclarations declarations, \a NumComponentLists total component /// lists, and \a NumComponents total expression components. /// /// \param MapModifiers Map-type-modifiers. /// \param MapModifiersLoc Locations of map-type-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param MapType Map type. /// \param MapTypeIsImplicit Map type is inferred implicitly. /// \param MapLoc Location of the map type. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPMapClause(ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, OpenMPMapClauseKind MapType, bool MapTypeIsImplicit, SourceLocation MapLoc, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_map, Locs, Sizes, /*SupportsMapper=*/true, &MapperQualifierLoc, &MapperIdInfo), MapType(MapType), MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) { assert(llvm::array_lengthof(MapTypeModifiers) == MapModifiers.size() && "Unexpected number of map type modifiers."); llvm::copy(MapModifiers, std::begin(MapTypeModifiers)); assert(llvm::array_lengthof(MapTypeModifiersLoc) == MapModifiersLoc.size() && "Unexpected number of map type modifier locations."); llvm::copy(MapModifiersLoc, std::begin(MapTypeModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPMapClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_map, OMPVarListLocTy(), Sizes, /*SupportsMapper=*/true) {} /// Set map-type-modifier for the clause. /// /// \param I index for map-type-modifier. /// \param T map-type-modifier for the clause. void setMapTypeModifier(unsigned I, OpenMPMapModifierKind T) { assert(I < NumberOfOMPMapClauseModifiers && "Unexpected index to store map type modifier, exceeds array size."); MapTypeModifiers[I] = T; } /// Set location for the map-type-modifier. /// /// \param I index for map-type-modifier location. /// \param TLoc map-type-modifier location. void setMapTypeModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfOMPMapClauseModifiers && "Index to store map type modifier location exceeds array size."); MapTypeModifiersLoc[I] = TLoc; } /// Set type for the clause. /// /// \param T Type for the clause. void setMapType(OpenMPMapClauseKind T) { MapType = T; } /// Set type location. /// /// \param TLoc Type location. void setMapLoc(SourceLocation TLoc) { MapLoc = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param MapModifiers Map-type-modifiers. /// \param MapModifiersLoc Location of map-type-modifiers. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. /// \param Type Map type. /// \param TypeIsImplicit Map type is inferred implicitly. /// \param TypeLoc Location of the map type. static OMPMapClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId, OpenMPMapClauseKind Type, bool TypeIsImplicit, SourceLocation TypeLoc); /// Creates an empty clause with the place for \a NumVars original /// expressions, \a NumUniqueDeclarations declarations, \NumComponentLists /// lists, and \a NumComponents expression components. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPMapClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches mapping kind for the clause. OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; } /// Is this an implicit map type? /// We have to capture 'IsMapTypeImplicit' from the parser for more /// informative error messages. It helps distinguish map(r) from /// map(tofrom: r), which is important to print more helpful error /// messages for some target directives. bool isImplicitMapType() const LLVM_READONLY { return MapTypeIsImplicit; } /// Fetches the map-type-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for map-type-modifier. OpenMPMapModifierKind getMapTypeModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMapClauseModifiers && "Requested modifier exceeds the total number of modifiers."); return MapTypeModifiers[Cnt]; } /// Fetches the map-type-modifier location at 'Cnt' index of array of /// modifiers' locations. /// /// \param Cnt index for map-type-modifier location. SourceLocation getMapTypeModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMapClauseModifiers && "Requested modifier location exceeds total number of modifiers."); return MapTypeModifiersLoc[Cnt]; } /// Fetches ArrayRef of map-type-modifiers. ArrayRef<OpenMPMapModifierKind> getMapTypeModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MapTypeModifiers); } /// Fetches ArrayRef of location of map-type-modifiers. ArrayRef<SourceLocation> getMapTypeModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MapTypeModifiersLoc); } /// Fetches location of clause mapping kind. SourceLocation getMapLoc() const LLVM_READONLY { return MapLoc; } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range( reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPMapClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { if (MapType == OMPC_MAP_to || MapType == OMPC_MAP_tofrom) return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { auto Children = const_cast<OMPMapClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_map; } }; /// This represents 'num_teams' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams num_teams(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'num_teams' /// with single expression 'n'. class OMPNumTeamsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// NumTeams number. Stmt *NumTeams = nullptr; /// Set the NumTeams number. /// /// \param E NumTeams number. void setNumTeams(Expr *E) { NumTeams = E; } public: /// Build 'num_teams' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumTeamsClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_teams, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTeams(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPNumTeamsClause() : OMPClause(llvm::omp::OMPC_num_teams, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return NumTeams number. Expr *getNumTeams() { return cast<Expr>(NumTeams); } /// Return NumTeams number. Expr *getNumTeams() const { return cast<Expr>(NumTeams); } child_range children() { return child_range(&NumTeams, &NumTeams + 1); } const_child_range children() const { return const_child_range(&NumTeams, &NumTeams + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_teams; } }; /// This represents 'thread_limit' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams thread_limit(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'thread_limit' /// with single expression 'n'. class OMPThreadLimitClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// ThreadLimit number. Stmt *ThreadLimit = nullptr; /// Set the ThreadLimit number. /// /// \param E ThreadLimit number. void setThreadLimit(Expr *E) { ThreadLimit = E; } public: /// Build 'thread_limit' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPThreadLimitClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_thread_limit, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadLimit(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPThreadLimitClause() : OMPClause(llvm::omp::OMPC_thread_limit, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return ThreadLimit number. Expr *getThreadLimit() { return cast<Expr>(ThreadLimit); } /// Return ThreadLimit number. Expr *getThreadLimit() const { return cast<Expr>(ThreadLimit); } child_range children() { return child_range(&ThreadLimit, &ThreadLimit + 1); } const_child_range children() const { return const_child_range(&ThreadLimit, &ThreadLimit + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_thread_limit; } }; /// This represents 'priority' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task priority(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'priority' with /// single expression 'n'. class OMPPriorityClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Priority number. Stmt *Priority = nullptr; /// Set the Priority number. /// /// \param E Priority number. void setPriority(Expr *E) { Priority = E; } public: /// Build 'priority' clause. /// /// \param Priority Expression associated with this clause. /// \param HelperPriority Helper priority for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPPriorityClause(Expr *Priority, Stmt *HelperPriority, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_priority, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Priority(Priority) { setPreInitStmt(HelperPriority, CaptureRegion); } /// Build an empty clause. OMPPriorityClause() : OMPClause(llvm::omp::OMPC_priority, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return Priority number. Expr *getPriority() { return cast<Expr>(Priority); } /// Return Priority number. Expr *getPriority() const { return cast<Expr>(Priority); } child_range children() { return child_range(&Priority, &Priority + 1); } const_child_range children() const { return const_child_range(&Priority, &Priority + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPPriorityClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_priority; } }; /// This represents 'grainsize' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop grainsize(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'grainsize' /// with single expression '4'. class OMPGrainsizeClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Grainsize = nullptr; /// Set safelen. void setGrainsize(Expr *Size) { Grainsize = Size; } public: /// Build 'grainsize' clause. /// /// \param Size Expression associated with this clause. /// \param HelperSize Helper grainsize for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPGrainsizeClause(Expr *Size, Stmt *HelperSize, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_grainsize, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Grainsize(Size) { setPreInitStmt(HelperSize, CaptureRegion); } /// Build an empty clause. explicit OMPGrainsizeClause() : OMPClause(llvm::omp::OMPC_grainsize, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); } child_range children() { return child_range(&Grainsize, &Grainsize + 1); } const_child_range children() const { return const_child_range(&Grainsize, &Grainsize + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPGrainsizeClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_grainsize; } }; /// This represents 'nogroup' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp taskloop nogroup /// \endcode /// In this example directive '#pragma omp taskloop' has 'nogroup' clause. class OMPNogroupClause : public OMPClause { public: /// Build 'nogroup' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_nogroup, StartLoc, EndLoc) {} /// Build an empty clause. OMPNogroupClause() : OMPClause(llvm::omp::OMPC_nogroup, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nogroup; } }; /// This represents 'num_tasks' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop num_tasks(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'num_tasks' /// with single expression '4'. class OMPNumTasksClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *NumTasks = nullptr; /// Set safelen. void setNumTasks(Expr *Size) { NumTasks = Size; } public: /// Build 'num_tasks' clause. /// /// \param Size Expression associated with this clause. /// \param HelperSize Helper grainsize for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNumTasksClause(Expr *Size, Stmt *HelperSize, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_tasks, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTasks(Size) { setPreInitStmt(HelperSize, CaptureRegion); } /// Build an empty clause. explicit OMPNumTasksClause() : OMPClause(llvm::omp::OMPC_num_tasks, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); } child_range children() { return child_range(&NumTasks, &NumTasks + 1); } const_child_range children() const { return const_child_range(&NumTasks, &NumTasks + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPNumTasksClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_tasks; } }; /// This represents 'hint' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp critical (name) hint(6) /// \endcode /// In this example directive '#pragma omp critical' has name 'name' and clause /// 'hint' with argument '6'. class OMPHintClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Hint expression of the 'hint' clause. Stmt *Hint = nullptr; /// Set hint expression. void setHint(Expr *H) { Hint = H; } public: /// Build 'hint' clause with expression \a Hint. /// /// \param Hint Hint expression. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc), Hint(Hint) {} /// Build an empty clause. OMPHintClause() : OMPClause(llvm::omp::OMPC_hint, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of threads. Expr *getHint() const { return cast_or_null<Expr>(Hint); } child_range children() { return child_range(&Hint, &Hint + 1); } const_child_range children() const { return const_child_range(&Hint, &Hint + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_hint; } }; /// This represents 'dist_schedule' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp distribute dist_schedule(static, 3) /// \endcode /// In this example directive '#pragma omp distribute' has 'dist_schedule' /// clause with arguments 'static' and '3'. class OMPDistScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'schedule' clause. OpenMPDistScheduleClauseKind Kind = OMPC_DIST_SCHEDULE_unknown; /// Start location of the schedule kind in source code. SourceLocation KindLoc; /// Location of ',' (if any). SourceLocation CommaLoc; /// Chunk size. Expr *ChunkSize = nullptr; /// Set schedule kind. /// /// \param K Schedule kind. void setDistScheduleKind(OpenMPDistScheduleClauseKind K) { Kind = K; } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setDistScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// Build 'dist_schedule' clause with schedule kind \a Kind and chunk /// size expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind DistSchedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. OMPDistScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize) : OMPClause(llvm::omp::OMPC_dist_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); } /// Build an empty clause. explicit OMPDistScheduleClause() : OMPClause(llvm::omp::OMPC_dist_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Get kind of the clause. OpenMPDistScheduleClauseKind getDistScheduleKind() const { return Kind; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getDistScheduleKindLoc() { return KindLoc; } /// Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } const_child_range children() const { auto Children = const_cast<OMPDistScheduleClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_dist_schedule; } }; /// This represents 'defaultmap' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp target defaultmap(tofrom: scalar) /// \endcode /// In this example directive '#pragma omp target' has 'defaultmap' clause of kind /// 'scalar' with modifier 'tofrom'. class OMPDefaultmapClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Modifiers for 'defaultmap' clause. OpenMPDefaultmapClauseModifier Modifier = OMPC_DEFAULTMAP_MODIFIER_unknown; /// Locations of modifiers. SourceLocation ModifierLoc; /// A kind of the 'defaultmap' clause. OpenMPDefaultmapClauseKind Kind = OMPC_DEFAULTMAP_unknown; /// Start location of the defaultmap kind in source code. SourceLocation KindLoc; /// Set defaultmap kind. /// /// \param K Defaultmap kind. void setDefaultmapKind(OpenMPDefaultmapClauseKind K) { Kind = K; } /// Set the defaultmap modifier. /// /// \param M Defaultmap modifier. void setDefaultmapModifier(OpenMPDefaultmapClauseModifier M) { Modifier = M; } /// Set location of the defaultmap modifier. void setDefaultmapModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set defaultmap kind start location. /// /// \param KLoc Defaultmap kind location. void setDefaultmapKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } public: /// Build 'defaultmap' clause with defaultmap kind \a Kind /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param EndLoc Ending location of the clause. /// \param Kind Defaultmap kind. /// \param M The modifier applied to 'defaultmap' clause. /// \param MLoc Location of the modifier OMPDefaultmapClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KLoc, SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind, OpenMPDefaultmapClauseModifier M) : OMPClause(llvm::omp::OMPC_defaultmap, StartLoc, EndLoc), LParenLoc(LParenLoc), Modifier(M), ModifierLoc(MLoc), Kind(Kind), KindLoc(KLoc) {} /// Build an empty clause. explicit OMPDefaultmapClause() : OMPClause(llvm::omp::OMPC_defaultmap, SourceLocation(), SourceLocation()) {} /// Get kind of the clause. OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; } /// Get the modifier of the clause. OpenMPDefaultmapClauseModifier getDefaultmapModifier() const { return Modifier; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getDefaultmapKindLoc() { return KindLoc; } /// Get the modifier location. SourceLocation getDefaultmapModifierLoc() const { return ModifierLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_defaultmap; } }; /// This represents clause 'to' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update to(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' /// with the variables 'a' and 'b'. class OMPToClause final : public OMPMappableExprListClause<OMPToClause>, private llvm::TrailingObjects< OMPToClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Motion-modifiers for the 'to' clause. OpenMPMotionModifierKind MotionModifiers[NumberOfOMPMotionModifiers] = { OMPC_MOTION_MODIFIER_unknown, OMPC_MOTION_MODIFIER_unknown}; /// Location of motion-modifiers for the 'to' clause. SourceLocation MotionModifiersLoc[NumberOfOMPMotionModifiers]; /// Colon location. SourceLocation ColonLoc; /// Build clause with number of variables \a NumVars. /// /// \param TheMotionModifiers Motion-modifiers. /// \param TheMotionModifiersLoc Locations of motion-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPToClause(ArrayRef<OpenMPMotionModifierKind> TheMotionModifiers, ArrayRef<SourceLocation> TheMotionModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_to, Locs, Sizes, /*SupportsMapper=*/true, &MapperQualifierLoc, &MapperIdInfo) { assert(llvm::array_lengthof(MotionModifiers) == TheMotionModifiers.size() && "Unexpected number of motion modifiers."); llvm::copy(TheMotionModifiers, std::begin(MotionModifiers)); assert(llvm::array_lengthof(MotionModifiersLoc) == TheMotionModifiersLoc.size() && "Unexpected number of motion modifier locations."); llvm::copy(TheMotionModifiersLoc, std::begin(MotionModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPToClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_to, OMPVarListLocTy(), Sizes, /*SupportsMapper=*/true) {} /// Set motion-modifier for the clause. /// /// \param I index for motion-modifier. /// \param T motion-modifier for the clause. void setMotionModifier(unsigned I, OpenMPMotionModifierKind T) { assert(I < NumberOfOMPMotionModifiers && "Unexpected index to store motion modifier, exceeds array size."); MotionModifiers[I] = T; } /// Set location for the motion-modifier. /// /// \param I index for motion-modifier location. /// \param TLoc motion-modifier location. void setMotionModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfOMPMotionModifiers && "Index to store motion modifier location exceeds array size."); MotionModifiersLoc[I] = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param MotionModifiers Motion-modifiers. /// \param MotionModifiersLoc Location of motion-modifiers. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. static OMPToClause *Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPToClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches the motion-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for motion-modifier. OpenMPMotionModifierKind getMotionModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier exceeds the total number of modifiers."); return MotionModifiers[Cnt]; } /// Fetches the motion-modifier location at 'Cnt' index of array of modifiers' /// locations. /// /// \param Cnt index for motion-modifier location. SourceLocation getMotionModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier location exceeds total number of modifiers."); return MotionModifiersLoc[Cnt]; } /// Fetches ArrayRef of motion-modifiers. ArrayRef<OpenMPMotionModifierKind> getMotionModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiers); } /// Fetches ArrayRef of location of motion-modifiers. ArrayRef<SourceLocation> getMotionModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiersLoc); } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPToClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_to; } }; /// This represents clause 'from' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update from(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'from' /// with the variables 'a' and 'b'. class OMPFromClause final : public OMPMappableExprListClause<OMPFromClause>, private llvm::TrailingObjects< OMPFromClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Motion-modifiers for the 'from' clause. OpenMPMotionModifierKind MotionModifiers[NumberOfOMPMotionModifiers] = { OMPC_MOTION_MODIFIER_unknown, OMPC_MOTION_MODIFIER_unknown}; /// Location of motion-modifiers for the 'from' clause. SourceLocation MotionModifiersLoc[NumberOfOMPMotionModifiers]; /// Colon location. SourceLocation ColonLoc; /// Build clause with number of variables \a NumVars. /// /// \param TheMotionModifiers Motion-modifiers. /// \param TheMotionModifiersLoc Locations of motion-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPFromClause(ArrayRef<OpenMPMotionModifierKind> TheMotionModifiers, ArrayRef<SourceLocation> TheMotionModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_from, Locs, Sizes, /*SupportsMapper=*/true, &MapperQualifierLoc, &MapperIdInfo) { assert(llvm::array_lengthof(MotionModifiers) == TheMotionModifiers.size() && "Unexpected number of motion modifiers."); llvm::copy(TheMotionModifiers, std::begin(MotionModifiers)); assert(llvm::array_lengthof(MotionModifiersLoc) == TheMotionModifiersLoc.size() && "Unexpected number of motion modifier locations."); llvm::copy(TheMotionModifiersLoc, std::begin(MotionModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPFromClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_from, OMPVarListLocTy(), Sizes, /*SupportsMapper=*/true) {} /// Set motion-modifier for the clause. /// /// \param I index for motion-modifier. /// \param T motion-modifier for the clause. void setMotionModifier(unsigned I, OpenMPMotionModifierKind T) { assert(I < NumberOfOMPMotionModifiers && "Unexpected index to store motion modifier, exceeds array size."); MotionModifiers[I] = T; } /// Set location for the motion-modifier. /// /// \param I index for motion-modifier location. /// \param TLoc motion-modifier location. void setMotionModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfOMPMotionModifiers && "Index to store motion modifier location exceeds array size."); MotionModifiersLoc[I] = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param MotionModifiers Motion-modifiers. /// \param MotionModifiersLoc Location of motion-modifiers. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. static OMPFromClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPFromClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches the motion-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for motion-modifier. OpenMPMotionModifierKind getMotionModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier exceeds the total number of modifiers."); return MotionModifiers[Cnt]; } /// Fetches the motion-modifier location at 'Cnt' index of array of modifiers' /// locations. /// /// \param Cnt index for motion-modifier location. SourceLocation getMotionModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier location exceeds total number of modifiers."); return MotionModifiersLoc[Cnt]; } /// Fetches ArrayRef of motion-modifiers. ArrayRef<OpenMPMotionModifierKind> getMotionModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiers); } /// Fetches ArrayRef of location of motion-modifiers. ArrayRef<SourceLocation> getMotionModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiersLoc); } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFromClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_from; } }; /// This represents clause 'use_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target data use_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target data' has clause /// 'use_device_ptr' with the variables 'a' and 'b'. class OMPUseDevicePtrClause final : public OMPMappableExprListClause<OMPUseDevicePtrClause>, private llvm::TrailingObjects< OMPUseDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDevicePtrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_ptr, Locs, Sizes) { } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDevicePtrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_ptr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return 3 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } /// Sets the list of references to private copies with initializers for new /// private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for new /// private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new private /// variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new private /// variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param PrivateVars Expressions referring to private copies. /// \param Inits Expressions referring to private copy initializers. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPUseDevicePtrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<Expr *> PrivateVars, ArrayRef<Expr *> Inits, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPUseDevicePtrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPUseDevicePtrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_use_device_ptr; } }; /// This represents clause 'use_device_addr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target data use_device_addr(a,b) /// \endcode /// In this example directive '#pragma omp target data' has clause /// 'use_device_addr' with the variables 'a' and 'b'. class OMPUseDeviceAddrClause final : public OMPMappableExprListClause<OMPUseDeviceAddrClause>, private llvm::TrailingObjects< OMPUseDeviceAddrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDeviceAddrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_addr, Locs, Sizes) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDeviceAddrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_addr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPUseDeviceAddrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPUseDeviceAddrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPUseDeviceAddrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_use_device_addr; } }; /// This represents clause 'is_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target is_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause /// 'is_device_ptr' with the variables 'a' and 'b'. class OMPIsDevicePtrClause final : public OMPMappableExprListClause<OMPIsDevicePtrClause>, private llvm::TrailingObjects< OMPIsDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPIsDevicePtrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_is_device_ptr, Locs, Sizes) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPIsDevicePtrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_is_device_ptr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPIsDevicePtrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPIsDevicePtrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPIsDevicePtrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_is_device_ptr; } }; /// This represents clause 'nontemporal' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp simd nontemporal(a) /// \endcode /// In this example directive '#pragma omp simd' has clause 'nontemporal' for /// the variable 'a'. class OMPNontemporalClause final : public OMPVarListClause<OMPNontemporalClause>, private llvm::TrailingObjects<OMPNontemporalClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPNontemporalClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPNontemporalClause>(llvm::omp::OMPC_nontemporal, StartLoc, LParenLoc, EndLoc, N) { } /// Build an empty clause. /// /// \param N Number of variables. explicit OMPNontemporalClause(unsigned N) : OMPVarListClause<OMPNontemporalClause>( llvm::omp::OMPC_nontemporal, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Get the list of privatied copies if the member expression was captured by /// one of the privatization clauses. MutableArrayRef<Expr *> getPrivateRefs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateRefs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPNontemporalClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPNontemporalClause *CreateEmpty(const ASTContext &C, unsigned N); /// Sets the list of references to private copies created in private clauses. /// \param VL List of references. void setPrivateRefs(ArrayRef<Expr *> VL); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPNontemporalClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range private_refs() { return child_range(reinterpret_cast<Stmt **>(getPrivateRefs().begin()), reinterpret_cast<Stmt **>(getPrivateRefs().end())); } const_child_range private_refs() const { auto Children = const_cast<OMPNontemporalClause *>(this)->private_refs(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nontemporal; } }; /// This represents 'order' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp simd order(concurrent) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'order' /// clause with kind 'concurrent'. class OMPOrderClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'default' clause. OpenMPOrderClauseKind Kind = OMPC_ORDER_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Argument of clause. void setKind(OpenMPOrderClauseKind K) { Kind = K; } /// Set argument location. /// /// \param KLoc Argument location. void setKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'order' clause with argument \p A ('concurrent'). /// /// \param A Argument of the clause ('concurrent'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPOrderClause(OpenMPOrderClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_order, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPOrderClause() : OMPClause(llvm::omp::OMPC_order, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPOrderClauseKind getKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_order; } }; /// This represents the 'init' clause in '#pragma omp ...' directives. /// /// \code /// #pragma omp interop init(target:obj) /// \endcode class OMPInitClause final : public OMPVarListClause<OMPInitClause>, private llvm::TrailingObjects<OMPInitClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of interop variable. SourceLocation VarLoc; bool IsTarget = false; bool IsTargetSync = false; void setInteropVar(Expr *E) { varlist_begin()[0] = E; } void setIsTarget(bool V) { IsTarget = V; } void setIsTargetSync(bool V) { IsTargetSync = V; } /// Sets the location of the interop variable. void setVarLoc(SourceLocation Loc) { VarLoc = Loc; } /// Build 'init' clause. /// /// \param IsTarget Uses the 'target' interop-type. /// \param IsTargetSync Uses the 'targetsync' interop-type. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param VarLoc Location of the interop variable. /// \param EndLoc Ending location of the clause. /// \param N Number of expressions. OMPInitClause(bool IsTarget, bool IsTargetSync, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPInitClause>(llvm::omp::OMPC_init, StartLoc, LParenLoc, EndLoc, N), VarLoc(VarLoc), IsTarget(IsTarget), IsTargetSync(IsTargetSync) {} /// Build an empty clause. OMPInitClause(unsigned N) : OMPVarListClause<OMPInitClause>(llvm::omp::OMPC_init, SourceLocation(), SourceLocation(), SourceLocation(), N) { } public: /// Creates a fully specified clause. /// /// \param C AST context. /// \param InteropVar The interop variable. /// \param PrefExprs The list of preference expressions. /// \param IsTarget Uses the 'target' interop-type. /// \param IsTargetSync Uses the 'targetsync' interop-type. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param VarLoc Location of the interop variable. /// \param EndLoc Ending location of the clause. static OMPInitClause *Create(const ASTContext &C, Expr *InteropVar, ArrayRef<Expr *> PrefExprs, bool IsTarget, bool IsTargetSync, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Creates an empty clause with \a N expressions. /// /// \param C AST context. /// \param N Number of expression items. static OMPInitClause *CreateEmpty(const ASTContext &C, unsigned N); /// Returns the location of the interop variable. SourceLocation getVarLoc() const { return VarLoc; } /// Returns the interop variable. Expr *getInteropVar() { return varlist_begin()[0]; } const Expr *getInteropVar() const { return varlist_begin()[0]; } /// Returns true is interop-type 'target' is used. bool getIsTarget() const { return IsTarget; } /// Returns true is interop-type 'targetsync' is used. bool getIsTargetSync() const { return IsTargetSync; } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPInitClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } using prefs_iterator = MutableArrayRef<Expr *>::iterator; using const_prefs_iterator = ArrayRef<const Expr *>::iterator; using prefs_range = llvm::iterator_range<prefs_iterator>; using const_prefs_range = llvm::iterator_range<const_prefs_iterator>; prefs_range prefs() { return prefs_range(reinterpret_cast<Expr **>(std::next(varlist_begin())), reinterpret_cast<Expr **>(varlist_end())); } const_prefs_range prefs() const { auto Prefs = const_cast<OMPInitClause *>(this)->prefs(); return const_prefs_range(Prefs.begin(), Prefs.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_init; } }; /// This represents the 'use' clause in '#pragma omp ...' directives. /// /// \code /// #pragma omp interop use(obj) /// \endcode class OMPUseClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Location of interop variable. SourceLocation VarLoc; /// The interop variable. Stmt *InteropVar = nullptr; /// Set the interop variable. void setInteropVar(Expr *E) { InteropVar = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Sets the location of the interop variable. void setVarLoc(SourceLocation Loc) { VarLoc = Loc; } public: /// Build 'use' clause with and interop variable expression \a InteropVar. /// /// \param InteropVar The interop variable. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param VarLoc Location of the interop variable. /// \param EndLoc Ending location of the clause. OMPUseClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_use, StartLoc, EndLoc), LParenLoc(LParenLoc), VarLoc(VarLoc), InteropVar(InteropVar) {} /// Build an empty clause. OMPUseClause() : OMPClause(llvm::omp::OMPC_use, SourceLocation(), SourceLocation()) {} /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns the location of the interop variable. SourceLocation getVarLoc() const { return VarLoc; } /// Returns the interop variable. Expr *getInteropVar() const { return cast<Expr>(InteropVar); } child_range children() { return child_range(&InteropVar, &InteropVar + 1); } const_child_range children() const { return const_child_range(&InteropVar, &InteropVar + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_use; } }; /// This represents 'destroy' clause in the '#pragma omp depobj' /// directive or the '#pragma omp interop' directive.. /// /// \code /// #pragma omp depobj(a) destroy /// #pragma omp interop destroy(obj) /// \endcode /// In these examples directive '#pragma omp depobj' and '#pragma omp interop' /// have a 'destroy' clause. The 'interop' directive includes an object. class OMPDestroyClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Location of interop variable. SourceLocation VarLoc; /// The interop variable. Stmt *InteropVar = nullptr; /// Set the interop variable. void setInteropVar(Expr *E) { InteropVar = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Sets the location of the interop variable. void setVarLoc(SourceLocation Loc) { VarLoc = Loc; } public: /// Build 'destroy' clause with an interop variable expression \a InteropVar. /// /// \param InteropVar The interop variable. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param VarLoc Location of the interop variable. /// \param EndLoc Ending location of the clause. OMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_destroy, StartLoc, EndLoc), LParenLoc(LParenLoc), VarLoc(VarLoc), InteropVar(InteropVar) {} /// Build 'destroy' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPDestroyClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_destroy, StartLoc, EndLoc) {} /// Build an empty clause. OMPDestroyClause() : OMPClause(llvm::omp::OMPC_destroy, SourceLocation(), SourceLocation()) { } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns the location of the interop variable. SourceLocation getVarLoc() const { return VarLoc; } /// Returns the interop variable. Expr *getInteropVar() const { return cast_or_null<Expr>(InteropVar); } child_range children() { if (InteropVar) return child_range(&InteropVar, &InteropVar + 1); return child_range(child_iterator(), child_iterator()); } const_child_range children() const { if (InteropVar) return const_child_range(&InteropVar, &InteropVar + 1); return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_destroy; } }; /// This represents 'novariants' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp dispatch novariants(a > 5) /// \endcode /// In this example directive '#pragma omp dispatch' has simple 'novariants' /// clause with condition 'a > 5'. class OMPNovariantsClause final : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Build 'novariants' clause with condition \a Cond. /// /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNovariantsClause(Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_novariants, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPNovariantsClause() : OMPClause(llvm::omp::OMPC_novariants, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPNovariantsClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_novariants; } }; /// This represents 'nocontext' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp dispatch nocontext(a > 5) /// \endcode /// In this example directive '#pragma omp dispatch' has simple 'nocontext' /// clause with condition 'a > 5'. class OMPNocontextClause final : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Build 'nocontext' clause with condition \a Cond. /// /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNocontextClause(Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_nocontext, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPNocontextClause() : OMPClause(llvm::omp::OMPC_nocontext, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPNocontextClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nocontext; } }; /// This represents 'detach' clause in the '#pragma omp task' directive. /// /// \code /// #pragma omp task detach(evt) /// \endcode /// In this example directive '#pragma omp detach' has simple 'detach' clause /// with the variable 'evt'. class OMPDetachClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Expression of the 'detach' clause. Stmt *Evt = nullptr; /// Set condition. void setEventHandler(Expr *E) { Evt = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Build 'detach' clause with event-handler \a Evt. /// /// \param Evt Event handler expression. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_detach, StartLoc, EndLoc), LParenLoc(LParenLoc), Evt(Evt) {} /// Build an empty clause. OMPDetachClause() : OMPClause(llvm::omp::OMPC_detach, SourceLocation(), SourceLocation()) {} /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns event-handler expression. Expr *getEventHandler() const { return cast_or_null<Expr>(Evt); } child_range children() { return child_range(&Evt, &Evt + 1); } const_child_range children() const { return const_child_range(&Evt, &Evt + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_detach; } }; /// This represents clause 'inclusive' in the '#pragma omp scan' directive. /// /// \code /// #pragma omp scan inclusive(a,b) /// \endcode /// In this example directive '#pragma omp scan' has clause 'inclusive' /// with the variables 'a' and 'b'. class OMPInclusiveClause final : public OMPVarListClause<OMPInclusiveClause>, private llvm::TrailingObjects<OMPInclusiveClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPInclusiveClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPInclusiveClause>(llvm::omp::OMPC_inclusive, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPInclusiveClause(unsigned N) : OMPVarListClause<OMPInclusiveClause>(llvm::omp::OMPC_inclusive, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. static OMPInclusiveClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPInclusiveClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPInclusiveClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_inclusive; } }; /// This represents clause 'exclusive' in the '#pragma omp scan' directive. /// /// \code /// #pragma omp scan exclusive(a,b) /// \endcode /// In this example directive '#pragma omp scan' has clause 'exclusive' /// with the variables 'a' and 'b'. class OMPExclusiveClause final : public OMPVarListClause<OMPExclusiveClause>, private llvm::TrailingObjects<OMPExclusiveClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPExclusiveClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPExclusiveClause>(llvm::omp::OMPC_exclusive, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPExclusiveClause(unsigned N) : OMPVarListClause<OMPExclusiveClause>(llvm::omp::OMPC_exclusive, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. static OMPExclusiveClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPExclusiveClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPExclusiveClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_exclusive; } }; /// This represents clause 'uses_allocators' in the '#pragma omp target'-based /// directives. /// /// \code /// #pragma omp target uses_allocators(default_allocator, my_allocator(traits)) /// \endcode /// In this example directive '#pragma omp target' has clause 'uses_allocators' /// with the allocators 'default_allocator' and user-defined 'my_allocator'. class OMPUsesAllocatorsClause final : public OMPClause, private llvm::TrailingObjects<OMPUsesAllocatorsClause, Expr *, SourceLocation> { public: /// Data for list of allocators. struct Data { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; private: friend class OMPClauseReader; friend TrailingObjects; enum class ExprOffsets { Allocator, AllocatorTraits, Total, }; enum class ParenLocsOffsets { LParen, RParen, Total, }; /// Location of '('. SourceLocation LParenLoc; /// Total number of allocators in the clause. unsigned NumOfAllocators = 0; /// Build clause. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of allocators asssociated with the clause. OMPUsesAllocatorsClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(llvm::omp::OMPC_uses_allocators, StartLoc, EndLoc), LParenLoc(LParenLoc), NumOfAllocators(N) {} /// Build an empty clause. /// \param N Number of allocators asssociated with the clause. /// explicit OMPUsesAllocatorsClause(unsigned N) : OMPClause(llvm::omp::OMPC_uses_allocators, SourceLocation(), SourceLocation()), NumOfAllocators(N) {} unsigned numTrailingObjects(OverloadToken<Expr *>) const { return NumOfAllocators * static_cast<int>(ExprOffsets::Total); } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Sets the allocators data for the clause. void setAllocatorsData(ArrayRef<OMPUsesAllocatorsClause::Data> Data); public: /// Creates clause with a list of allocators \p Data. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param Data List of allocators. static OMPUsesAllocatorsClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<OMPUsesAllocatorsClause::Data> Data); /// Creates an empty clause with the place for \p N allocators. /// /// \param C AST context. /// \param N The number of allocators. static OMPUsesAllocatorsClause *CreateEmpty(const ASTContext &C, unsigned N); /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of allocators associated with the clause. unsigned getNumberOfAllocators() const { return NumOfAllocators; } /// Returns data for the specified allocator. OMPUsesAllocatorsClause::Data getAllocatorData(unsigned I) const; // Iterators child_range children() { Stmt **Begin = reinterpret_cast<Stmt **>(getTrailingObjects<Expr *>()); return child_range(Begin, Begin + NumOfAllocators * static_cast<int>(ExprOffsets::Total)); } const_child_range children() const { Stmt *const *Begin = reinterpret_cast<Stmt *const *>(getTrailingObjects<Expr *>()); return const_child_range( Begin, Begin + NumOfAllocators * static_cast<int>(ExprOffsets::Total)); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_uses_allocators; } }; /// This represents clause 'affinity' in the '#pragma omp task'-based /// directives. /// /// \code /// #pragma omp task affinity(iterator(i = 0:n) : ([3][n])a, b[:n], c[i]) /// \endcode /// In this example directive '#pragma omp task' has clause 'affinity' with the /// affinity modifer 'iterator(i = 0:n)' and locator items '([3][n])a', 'b[:n]' /// and 'c[i]'. class OMPAffinityClause final : public OMPVarListClause<OMPAffinityClause>, private llvm::TrailingObjects<OMPAffinityClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':' symbol. SourceLocation ColonLoc; /// Build clause. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param N Number of locators asssociated with the clause. OMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPAffinityClause>(llvm::omp::OMPC_affinity, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// \param N Number of locators asssociated with the clause. /// explicit OMPAffinityClause(unsigned N) : OMPVarListClause<OMPAffinityClause>(llvm::omp::OMPC_affinity, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets the affinity modifier for the clause, if any. void setModifier(Expr *E) { getTrailingObjects<Expr *>()[varlist_size()] = E; } /// Sets the location of ':' symbol. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a modifier a list of locator items. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param Locators List of locator items. static OMPAffinityClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// Creates an empty clause with the place for \p N locator items. /// /// \param C AST context. /// \param N The number of locator items. static OMPAffinityClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets affinity modifier. Expr *getModifier() { return getTrailingObjects<Expr *>()[varlist_size()]; } Expr *getModifier() const { return getTrailingObjects<Expr *>()[varlist_size()]; } /// Gets the location of ':' symbol. SourceLocation getColonLoc() const { return ColonLoc; } // Iterators child_range children() { int Offset = getModifier() ? 1 : 0; return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end() + Offset)); } const_child_range children() const { auto Children = const_cast<OMPAffinityClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_affinity; } }; /// This represents 'filter' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp masked filter(tid) /// \endcode /// In this example directive '#pragma omp masked' has 'filter' clause with /// thread id. class OMPFilterClause final : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Express of the 'filter' clause. Stmt *ThreadID = nullptr; /// Sets the thread identifier. void setThreadID(Expr *TID) { ThreadID = TID; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Build 'filter' clause with thread-id \a ThreadID. /// /// \param ThreadID Thread identifier. /// \param HelperE Helper expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPFilterClause(Expr *ThreadID, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_filter, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadID(ThreadID) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPFilterClause() : OMPClause(llvm::omp::OMPC_filter, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return thread identifier. Expr *getThreadID() { return cast<Expr>(ThreadID); } /// Return thread identifier. Expr *getThreadID() const { return cast<Expr>(ThreadID); } child_range children() { return child_range(&ThreadID, &ThreadID + 1); } const_child_range children() const { return const_child_range(&ThreadID, &ThreadID + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_filter; } }; /// This represents 'bind' clause in the '#pragma omp ...' directives. /// /// \code /// #pragma omp loop bind(parallel) /// \endcode class OMPBindClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// The binding kind of 'bind' clause. OpenMPBindClauseKind Kind = OMPC_BIND_unknown; /// Start location of the kind in source code. SourceLocation KindLoc; /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set the binding kind. void setBindKind(OpenMPBindClauseKind K) { Kind = K; } /// Set the binding kind location. void setBindKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Build 'bind' clause with kind \a K ('teams', 'parallel', or 'thread'). /// /// \param K Binding kind of the clause ('teams', 'parallel' or 'thread'). /// \param KLoc Starting location of the binding kind. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPBindClause(OpenMPBindClauseKind K, SourceLocation KLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_bind, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(K), KindLoc(KLoc) {} /// Build an empty clause. OMPBindClause() : OMPClause(llvm::omp::OMPC_bind, SourceLocation(), SourceLocation()) {} public: /// Build 'bind' clause with kind \a K ('teams', 'parallel', or 'thread'). /// /// \param C AST context /// \param K Binding kind of the clause ('teams', 'parallel' or 'thread'). /// \param KLoc Starting location of the binding kind. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. static OMPBindClause *Create(const ASTContext &C, OpenMPBindClauseKind K, SourceLocation KLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Build an empty 'bind' clause. /// /// \param C AST context static OMPBindClause *CreateEmpty(const ASTContext &C); /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPBindClauseKind getBindKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getBindKindLoc() const { return KindLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_bind; } }; /// This class implements a simple visitor for OMPClause /// subclasses. template<class ImplClass, template <typename> class Ptr, typename RetTy> class OMPClauseVisitorBase { public: #define PTR(CLASS) Ptr<CLASS> #define DISPATCH(CLASS) \ return static_cast<ImplClass*>(this)->Visit##CLASS(static_cast<PTR(CLASS)>(S)) #define GEN_CLANG_CLAUSE_CLASS #define CLAUSE_CLASS(Enum, Str, Class) \ RetTy Visit##Class(PTR(Class) S) { DISPATCH(Class); } #include "llvm/Frontend/OpenMP/OMP.inc" RetTy Visit(PTR(OMPClause) S) { // Top switch clause: visit each OMPClause. switch (S->getClauseKind()) { #define GEN_CLANG_CLAUSE_CLASS #define CLAUSE_CLASS(Enum, Str, Class) \ case llvm::omp::Clause::Enum: \ return Visit##Class(static_cast<PTR(Class)>(S)); #define CLAUSE_NO_CLASS(Enum, Str) \ case llvm::omp::Clause::Enum: \ break; #include "llvm/Frontend/OpenMP/OMP.inc" } } // Base case, ignore it. :) RetTy VisitOMPClause(PTR(OMPClause) Node) { return RetTy(); } #undef PTR #undef DISPATCH }; template <typename T> using const_ptr = std::add_pointer_t<std::add_const_t<T>>; template <class ImplClass, typename RetTy = void> class OMPClauseVisitor : public OMPClauseVisitorBase<ImplClass, std::add_pointer_t, RetTy> {}; template<class ImplClass, typename RetTy = void> class ConstOMPClauseVisitor : public OMPClauseVisitorBase <ImplClass, const_ptr, RetTy> {}; class OMPClausePrinter final : public OMPClauseVisitor<OMPClausePrinter> { raw_ostream &OS; const PrintingPolicy &Policy; /// Process clauses with list of variables. template <typename T> void VisitOMPClauseList(T *Node, char StartSym); /// Process motion clauses. template <typename T> void VisitOMPMotionClause(T *Node); public: OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy) : OS(OS), Policy(Policy) {} #define GEN_CLANG_CLAUSE_CLASS #define CLAUSE_CLASS(Enum, Str, Class) void Visit##Class(Class *S); #include "llvm/Frontend/OpenMP/OMP.inc" }; struct OMPTraitProperty { llvm::omp::TraitProperty Kind = llvm::omp::TraitProperty::invalid; /// The raw string as we parsed it. This is needed for the `isa` trait set /// (which accepts anything) and (later) extensions. StringRef RawString; }; struct OMPTraitSelector { Expr *ScoreOrCondition = nullptr; llvm::omp::TraitSelector Kind = llvm::omp::TraitSelector::invalid; llvm::SmallVector<OMPTraitProperty, 1> Properties; }; struct OMPTraitSet { llvm::omp::TraitSet Kind = llvm::omp::TraitSet::invalid; llvm::SmallVector<OMPTraitSelector, 2> Selectors; }; /// Helper data structure representing the traits in a match clause of an /// `declare variant` or `metadirective`. The outer level is an ordered /// collection of selector sets, each with an associated kind and an ordered /// collection of selectors. A selector has a kind, an optional score/condition, /// and an ordered collection of properties. class OMPTraitInfo { /// Private constructor accesible only by ASTContext. OMPTraitInfo() {} friend class ASTContext; public: /// Reconstruct a (partial) OMPTraitInfo object from a mangled name. OMPTraitInfo(StringRef MangledName); /// The outermost level of selector sets. llvm::SmallVector<OMPTraitSet, 2> Sets; bool anyScoreOrCondition( llvm::function_ref<bool(Expr *&, bool /* IsScore */)> Cond) { return llvm::any_of(Sets, [&](OMPTraitSet &Set) { return llvm::any_of( Set.Selectors, [&](OMPTraitSelector &Selector) { return Cond(Selector.ScoreOrCondition, /* IsScore */ Selector.Kind != llvm::omp::TraitSelector::user_condition); }); }); } /// Create a variant match info object from this trait info object. While the /// former is a flat representation the actual main difference is that the /// latter uses clang::Expr to store the score/condition while the former is /// independent of clang. Thus, expressions and conditions are evaluated in /// this method. void getAsVariantMatchInfo(ASTContext &ASTCtx, llvm::omp::VariantMatchInfo &VMI) const; /// Return a string representation identifying this context selector. std::string getMangledName() const; /// Check the extension trait \p TP is active. bool isExtensionActive(llvm::omp::TraitProperty TP) { for (const OMPTraitSet &Set : Sets) { if (Set.Kind != llvm::omp::TraitSet::implementation) continue; for (const OMPTraitSelector &Selector : Set.Selectors) { if (Selector.Kind != llvm::omp::TraitSelector::implementation_extension) continue; for (const OMPTraitProperty &Property : Selector.Properties) { if (Property.Kind == TP) return true; } } } return false; } /// Print a human readable representation into \p OS. void print(llvm::raw_ostream &OS, const PrintingPolicy &Policy) const; }; llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo &TI); llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo *TI); /// Clang specific specialization of the OMPContext to lookup target features. struct TargetOMPContext final : public llvm::omp::OMPContext { TargetOMPContext(ASTContext &ASTCtx, std::function<void(StringRef)> &&DiagUnknownTrait, const FunctionDecl *CurrentFunctionDecl, ArrayRef<llvm::omp::TraitProperty> ConstructTraits); virtual ~TargetOMPContext() = default; /// See llvm::omp::OMPContext::matchesISATrait bool matchesISATrait(StringRef RawString) const override; private: std::function<bool(StringRef)> FeatureValidityCheck; std::function<void(StringRef)> DiagUnknownTrait; llvm::StringMap<bool> FeatureMap; }; /// Contains data for OpenMP directives: clauses, children /// expressions/statements (helpers for codegen) and associated statement, if /// any. class OMPChildren final : private llvm::TrailingObjects<OMPChildren, OMPClause *, Stmt *> { friend TrailingObjects; friend class OMPClauseReader; friend class OMPExecutableDirective; template <typename T> friend class OMPDeclarativeDirective; /// Numbers of clauses. unsigned NumClauses = 0; /// Number of child expressions/stmts. unsigned NumChildren = 0; /// true if the directive has associated statement. bool HasAssociatedStmt = false; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<OMPClause *>) const { return NumClauses; } OMPChildren() = delete; OMPChildren(unsigned NumClauses, unsigned NumChildren, bool HasAssociatedStmt) : NumClauses(NumClauses), NumChildren(NumChildren), HasAssociatedStmt(HasAssociatedStmt) {} static size_t size(unsigned NumClauses, bool HasAssociatedStmt, unsigned NumChildren); static OMPChildren *Create(void *Mem, ArrayRef<OMPClause *> Clauses); static OMPChildren *Create(void *Mem, ArrayRef<OMPClause *> Clauses, Stmt *S, unsigned NumChildren = 0); static OMPChildren *CreateEmpty(void *Mem, unsigned NumClauses, bool HasAssociatedStmt = false, unsigned NumChildren = 0); public: unsigned getNumClauses() const { return NumClauses; } unsigned getNumChildren() const { return NumChildren; } bool hasAssociatedStmt() const { return HasAssociatedStmt; } /// Set associated statement. void setAssociatedStmt(Stmt *S) { getTrailingObjects<Stmt *>()[NumChildren] = S; } void setChildren(ArrayRef<Stmt *> Children); /// Sets the list of variables for this clause. /// /// \param Clauses The list of clauses for the directive. /// void setClauses(ArrayRef<OMPClause *> Clauses); /// Returns statement associated with the directive. const Stmt *getAssociatedStmt() const { return const_cast<OMPChildren *>(this)->getAssociatedStmt(); } Stmt *getAssociatedStmt() { assert(HasAssociatedStmt && "Expected directive with the associated statement."); return getTrailingObjects<Stmt *>()[NumChildren]; } /// Get the clauses storage. MutableArrayRef<OMPClause *> getClauses() { return llvm::makeMutableArrayRef(getTrailingObjects<OMPClause *>(), NumClauses); } ArrayRef<OMPClause *> getClauses() const { return const_cast<OMPChildren *>(this)->getClauses(); } /// Returns the captured statement associated with the /// component region within the (combined) directive. /// /// \param RegionKind Component region kind. const CapturedStmt * getCapturedStmt(OpenMPDirectiveKind RegionKind, ArrayRef<OpenMPDirectiveKind> CaptureRegions) const { assert(llvm::any_of( CaptureRegions, [=](const OpenMPDirectiveKind K) { return K == RegionKind; }) && "RegionKind not found in OpenMP CaptureRegions."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (auto ThisCaptureRegion : CaptureRegions) { if (ThisCaptureRegion == RegionKind) return CS; CS = cast<CapturedStmt>(CS->getCapturedStmt()); } llvm_unreachable("Incorrect RegionKind specified for directive."); } /// Get innermost captured statement for the construct. CapturedStmt * getInnermostCapturedStmt(ArrayRef<OpenMPDirectiveKind> CaptureRegions) { assert(hasAssociatedStmt() && "Must have associated captured statement."); assert(!CaptureRegions.empty() && "At least one captured statement must be provided."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (unsigned Level = CaptureRegions.size(); Level > 1; --Level) CS = cast<CapturedStmt>(CS->getCapturedStmt()); return CS; } const CapturedStmt * getInnermostCapturedStmt(ArrayRef<OpenMPDirectiveKind> CaptureRegions) const { return const_cast<OMPChildren *>(this)->getInnermostCapturedStmt( CaptureRegions); } MutableArrayRef<Stmt *> getChildren(); ArrayRef<Stmt *> getChildren() const { return const_cast<OMPChildren *>(this)->getChildren(); } Stmt *getRawStmt() { assert(HasAssociatedStmt && "Expected directive with the associated statement."); if (auto *CS = dyn_cast<CapturedStmt>(getAssociatedStmt())) { Stmt *S = nullptr; do { S = CS->getCapturedStmt(); CS = dyn_cast<CapturedStmt>(S); } while (CS); return S; } return getAssociatedStmt(); } const Stmt *getRawStmt() const { return const_cast<OMPChildren *>(this)->getRawStmt(); } Stmt::child_range getAssociatedStmtAsRange() { if (!HasAssociatedStmt) return Stmt::child_range(Stmt::child_iterator(), Stmt::child_iterator()); return Stmt::child_range(&getTrailingObjects<Stmt *>()[NumChildren], &getTrailingObjects<Stmt *>()[NumChildren + 1]); } }; } // namespace clang #endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
logger.h
#ifndef KISS_LOG_H #define KISS_LOG_H #include <iostream> namespace kiss { enum eLogLevel { LOG_LEVEL_ERROR, LOG_LEVEL_WARNING, LOG_LEVEL_INFO, LOG_LEVEL_DEBUG }; class Logger { static std::ostream *stream; static eLogLevel level; public: Logger(eLogLevel level); ~Logger(); static std::ostream &getLogStream(); static void setLogStream(std::ostream *s); static void setLogStream(std::ostream &s); static void setLogLevel(eLogLevel level); static eLogLevel getLogLevel(); static void loadEnvLogLevel(); operator std::ostream &() { return getLogStream(); } template<typename T> inline Logger& operator<<(T& data) { #pragma omp critical (KISS_LOGGER) { getLogStream() << data; } return *this; } inline Logger& operator<<(std::ostream& (*func)(std::ostream&)) { #pragma omp critical (KISS_LOGGER) { getLogStream() << func; } return *this; } }; } // namespace kiss #define KISS_LOG_ERROR if (kiss::Logger::getLogLevel() < kiss::LOG_LEVEL_ERROR) {} else kiss::Logger(kiss::LOG_LEVEL_ERROR) #define KISS_LOG_WARNING if (kiss::Logger::getLogLevel() < kiss::LOG_LEVEL_WARNING) {} else kiss::Logger(kiss::LOG_LEVEL_WARNING) #define KISS_LOG_INFO if (kiss::Logger::getLogLevel() < kiss::LOG_LEVEL_INFO) {} else kiss::Logger(kiss::LOG_LEVEL_INFO) #define KISS_LOG_DEBUG if (kiss::Logger::getLogLevel() < kiss::LOG_LEVEL_DEBUG) {} else kiss::Logger(kiss::LOG_LEVEL_DEBUG) #endif /* KISSLOG_H */
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 24; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
triplet_iw.c
/* Copyright (C) 2016 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include "triplet_iw.h" #include <math.h> #include "grgrid.h" #include "phonoc_utils.h" #include "tetrahedron_method.h" #include "triplet.h" static void set_freq_vertices(double freq_vertices[3][24][4], const double *frequencies1, const double *frequencies2, const long vertices[2][24][4], const long num_band1, const long num_band2, const long b1, const long b2, const long tp_type); static long set_g(double g[3], const double f0, const double freq_vertices[3][24][4], const long max_i); static void get_triplet_tetrahedra_vertices( long vertices[2][24][4], const long tp_relative_grid_address[2][24][4][3], const long triplet[3], const ConstBZGrid *bzgrid); static void get_neighboring_grid_points_type1( long *neighboring_grid_points, const long grid_point, const long (*relative_grid_address)[3], const long num_relative_grid_address, const ConstBZGrid *bzgrid); static void get_neighboring_grid_points_type2( long *neighboring_grid_points, const long grid_point, const long (*relative_grid_address)[3], const long num_relative_grid_address, const ConstBZGrid *bzgrid); void tpi_get_integration_weight( double *iw, char *iw_zero, const double *frequency_points, const long num_band0, const long tp_relative_grid_address[2][24][4][3], const long triplets[3], const long num_triplets, const ConstBZGrid *bzgrid, const double *frequencies1, const long num_band1, const double *frequencies2, const long num_band2, const long tp_type, const long openmp_per_bands) { long max_i, j, b1, b2, b12, num_band_prod, adrs_shift; long vertices[2][24][4]; double g[3]; double freq_vertices[3][24][4]; get_triplet_tetrahedra_vertices(vertices, tp_relative_grid_address, triplets, bzgrid); num_band_prod = num_triplets * num_band0 * num_band1 * num_band2; /* tp_type: Type of integration weights stored */ /* */ /* g0 -> \delta(f0 - (-f1 + f2)) */ /* g1 -> \delta(f0 - (f1 - f2)) */ /* g2 -> \delta(f0 - (f1 + f2)) */ /* */ /* tp_type = 2: (g[2], g[0] - g[1]) mainly for ph-ph */ /* tp_type = 3: (g[2], g[0] - g[1], g[0] + g[1] + g[2]) mainly for ph-ph */ /* tp_type = 4: (g[0]) mainly for el-ph phonon decay, */ /* f0: ph, f1: el_i, f2: el_f */ if ((tp_type == 2) || (tp_type == 3)) { max_i = 3; } if (tp_type == 4) { max_i = 1; } #ifdef _OPENMP #pragma omp parallel for private(j, b1, b2, adrs_shift, g, \ freq_vertices) if (openmp_per_bands) #endif for (b12 = 0; b12 < num_band1 * num_band2; b12++) { b1 = b12 / num_band2; b2 = b12 % num_band2; set_freq_vertices(freq_vertices, frequencies1, frequencies2, vertices, num_band1, num_band2, b1, b2, tp_type); for (j = 0; j < num_band0; j++) { adrs_shift = j * num_band1 * num_band2 + b1 * num_band2 + b2; iw_zero[adrs_shift] = set_g(g, frequency_points[j], freq_vertices, max_i); if (tp_type == 2) { iw[adrs_shift] = g[2]; adrs_shift += num_band_prod; iw[adrs_shift] = g[0] - g[1]; } if (tp_type == 3) { iw[adrs_shift] = g[2]; adrs_shift += num_band_prod; iw[adrs_shift] = g[0] - g[1]; adrs_shift += num_band_prod; iw[adrs_shift] = g[0] + g[1] + g[2]; } if (tp_type == 4) { iw[adrs_shift] = g[0]; } } } } void tpi_get_integration_weight_with_sigma( double *iw, char *iw_zero, const double sigma, const double cutoff, const double *frequency_points, const long num_band0, const long triplet[3], const long const_adrs_shift, const double *frequencies, const long num_band, const long tp_type, const long openmp_per_bands) { long j, b12, b1, b2, adrs_shift; double f0, f1, f2, g0, g1, g2; #ifdef _OPENMP #pragma omp parallel for private(j, b1, b2, f0, f1, f2, g0, g1, g2, \ adrs_shift) if (openmp_per_bands) #endif for (b12 = 0; b12 < num_band * num_band; b12++) { b1 = b12 / num_band; b2 = b12 % num_band; f1 = frequencies[triplet[1] * num_band + b1]; f2 = frequencies[triplet[2] * num_band + b2]; for (j = 0; j < num_band0; j++) { f0 = frequency_points[j]; adrs_shift = j * num_band * num_band + b1 * num_band + b2; if ((tp_type == 2) || (tp_type == 3)) { if (cutoff > 0 && fabs(f0 + f1 - f2) > cutoff && fabs(f0 - f1 + f2) > cutoff && fabs(f0 - f1 - f2) > cutoff) { iw_zero[adrs_shift] = 1; g0 = 0; g1 = 0; g2 = 0; } else { iw_zero[adrs_shift] = 0; g0 = phonoc_gaussian(f0 + f1 - f2, sigma); g1 = phonoc_gaussian(f0 - f1 + f2, sigma); g2 = phonoc_gaussian(f0 - f1 - f2, sigma); } if (tp_type == 2) { iw[adrs_shift] = g2; adrs_shift += const_adrs_shift; iw[adrs_shift] = g0 - g1; } if (tp_type == 3) { iw[adrs_shift] = g2; adrs_shift += const_adrs_shift; iw[adrs_shift] = g0 - g1; adrs_shift += const_adrs_shift; iw[adrs_shift] = g0 + g1 + g2; } } if (tp_type == 4) { if (cutoff > 0 && fabs(f0 + f1 - f2) > cutoff) { iw_zero[adrs_shift] = 1; iw[adrs_shift] = 0; } else { iw_zero[adrs_shift] = 0; iw[adrs_shift] = phonoc_gaussian(f0 + f1 - f2, sigma); } } } } } void tpi_get_neighboring_grid_points(long *neighboring_grid_points, const long grid_point, const long (*relative_grid_address)[3], const long num_relative_grid_address, const ConstBZGrid *bzgrid) { if (bzgrid->type == 1) { get_neighboring_grid_points_type1(neighboring_grid_points, grid_point, relative_grid_address, num_relative_grid_address, bzgrid); } else { get_neighboring_grid_points_type2(neighboring_grid_points, grid_point, relative_grid_address, num_relative_grid_address, bzgrid); } } static void set_freq_vertices(double freq_vertices[3][24][4], const double *frequencies1, const double *frequencies2, const long vertices[2][24][4], const long num_band1, const long num_band2, const long b1, const long b2, const long tp_type) { long i, j; double f1, f2; for (i = 0; i < 24; i++) { for (j = 0; j < 4; j++) { f1 = frequencies1[vertices[0][i][j] * num_band1 + b1]; f2 = frequencies2[vertices[1][i][j] * num_band2 + b2]; if ((tp_type == 2) || (tp_type == 3)) { if (f1 < 0) { f1 = 0; } if (f2 < 0) { f2 = 0; } freq_vertices[0][i][j] = -f1 + f2; freq_vertices[1][i][j] = f1 - f2; freq_vertices[2][i][j] = f1 + f2; } else { freq_vertices[0][i][j] = -f1 + f2; } } } } /* Integration weight g is calculated. */ /* iw_zero = 1 means g[0] to g[max_i - 1] are all zero. */ /* max_i depends on what we compute, e.g., ph-ph lifetime, */ /* ph-ph collision matrix, and el-ph relaxation time. */ /* iw_zero is definitely determined by in_tetrahedra in case that */ /* f0 is out of the tetrahedra. */ /* iw_zero=1 information can be used to omit to compute particles */ /* interaction strength that is often heaviest part in throughout */ /* calculation. */ static long set_g(double g[3], const double f0, const double freq_vertices[3][24][4], const long max_i) { long i, iw_zero; iw_zero = 1; for (i = 0; i < max_i; i++) { if (thm_in_tetrahedra(f0, freq_vertices[i])) { g[i] = thm_get_integration_weight(f0, freq_vertices[i], 'I'); iw_zero = 0; } else { g[i] = 0; } } return iw_zero; } static void get_triplet_tetrahedra_vertices( long vertices[2][24][4], const long tp_relative_grid_address[2][24][4][3], const long triplet[3], const ConstBZGrid *bzgrid) { long i, j; for (i = 0; i < 2; i++) { for (j = 0; j < 24; j++) { tpi_get_neighboring_grid_points(vertices[i][j], triplet[i + 1], tp_relative_grid_address[i][j], 4, bzgrid); } } } static void get_neighboring_grid_points_type1( long *neighboring_grid_points, const long grid_point, const long (*relative_grid_address)[3], const long num_relative_grid_address, const ConstBZGrid *bzgrid) { long bzmesh[3], bz_address[3]; long i, j, bz_gp, prod_bz_mesh; for (i = 0; i < 3; i++) { bzmesh[i] = bzgrid->D_diag[i] * 2; } prod_bz_mesh = bzmesh[0] * bzmesh[1] * bzmesh[2]; for (i = 0; i < num_relative_grid_address; i++) { for (j = 0; j < 3; j++) { bz_address[j] = bzgrid->addresses[grid_point][j] + relative_grid_address[i][j]; } bz_gp = bzgrid->gp_map[grg_get_grid_index(bz_address, bzmesh)]; if (bz_gp == prod_bz_mesh) { neighboring_grid_points[i] = grg_get_grid_index(bz_address, bzgrid->D_diag); } else { neighboring_grid_points[i] = bz_gp; } } } static void get_neighboring_grid_points_type2( long *neighboring_grid_points, const long grid_point, const long (*relative_grid_address)[3], const long num_relative_grid_address, const ConstBZGrid *bzgrid) { long bz_address[3]; long i, j, gp; for (i = 0; i < num_relative_grid_address; i++) { for (j = 0; j < 3; j++) { bz_address[j] = bzgrid->addresses[grid_point][j] + relative_grid_address[i][j]; } gp = grg_get_grid_index(bz_address, bzgrid->D_diag); neighboring_grid_points[i] = bzgrid->gp_map[gp]; if (bzgrid->gp_map[gp + 1] - bzgrid->gp_map[gp] > 1) { for (j = bzgrid->gp_map[gp]; j < bzgrid->gp_map[gp + 1]; j++) { if (bz_address[0] == bzgrid->addresses[j][0] && bz_address[1] == bzgrid->addresses[j][1] && bz_address[2] == bzgrid->addresses[j][2]) { neighboring_grid_points[i] = j; break; } } } } }
atomic.h
/* Atomic operations (v1) * Portable Snippets - https://gitub.com/nemequ/portable-snippets * Created by Evan Nemerson <evan@nemerson.com> * * To the extent possible under law, the authors have waived all * copyright and related or neighboring rights to this code. For * details, see the Creative Commons Zero 1.0 Universal license at * https://creativecommons.org/publicdomain/zero/1.0/ * * This is a small abstraction layer for some common atomic operations * (load, store, add, subtract, and compare & swap) implemented using * various compiler-specific builtins. * * There are four types, 32-bit and 64-bit integers which are both * atomic and non-atomic. The atomic versions should be used for the * atomic variable, the non-atomic variables should be used to store * values read from or written to an atomic variable. For example, a * basic CAS loop: * * void square_dest(psnip_atomic_int64* value) { * psnip_int64_t expected; * do { * expected = psnip_atomic_int64_load(&value); * } while (!psnip_atomic_int64_compare_exchange(&value, &expected, expected * expected)); * } * * Most things are implemented with the preprocessor, but if they were * functions the prototypes (the 64-bit versions, just s/64/32/ for * the 32-bit versions) would loo like: * * psnip_int64_t psnip_atomic_int64_load( * psnip_atomic_int64* object); * void psnip_atomic_int64_store( * psnip_atomic_int64* object, * psnip_int64_t desired); * _Bool psnip_atomic_int64_compare_exchange( * psnip_atomic_int64* object, * psnip_int64_t* expected, * psnip_int64_t desired); * psnip_int64_t psnip_atomic_int64_add( * psnip_atomic_int64* object, * psnip_int64_t operand); * psnip_int64_t psnip_atomic_int64_sub( * psnip_atomic_int64* object, * psnip_int64_t operand); */ #if !defined(PSNIP_ATOMIC_H) #define PSNIP_ATOMIC_H #if !defined(psnip_int64_t) || !defined(psnip_int32_t) # include "exact-int.h" #endif #if !defined(PSNIP_ATOMIC_STATIC_INLINE) # if defined(__GNUC__) # define PSNIP_ATOMIC__COMPILER_ATTRIBUTES __attribute__((__unused__)) # else # define PSNIP_ATOMIC__COMPILER_ATTRIBUTES # endif # if defined(HEDLEY_INLINE) # define PSNIP_ATOMIC__INLINE HEDLEY_INLINE # elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L # define PSNIP_ATOMIC__INLINE inline # elif defined(__GNUC_STDC_INLINE__) # define PSNIP_ATOMIC__INLINE __inline__ # elif defined(_MSC_VER) && _MSC_VER >= 1200 # define PSNIP_ATOMIC__INLINE __inline # else # define PSNIP_ATOMIC__INLINE # endif # define PSNIP_ATOMIC__FUNCTION PSNIP_ATOMIC__COMPILER_ATTRIBUTES static PSNIP_ATOMIC__INLINE #endif #if defined(__has_feature) # define PSNIP_ATOMIC_HAS_FEATURE(feature) __has_feature(feature) #else # define PSNIP_ATOMIC_HAS_FEATURE(feature) 0 #endif #define PSNIP_ATOMIC_IMPL_NONE 0 #define PSNIP_ATOMIC_IMPL_GCC 1 #define PSNIP_ATOMIC_IMPL_GCC_SYNC 2 #define PSNIP_ATOMIC_IMPL_CLANG 3 #define PSNIP_ATOMIC_IMPL_MS 4 #define PSNIP_ATOMIC_IMPL_OPENMP 5 #define PSNIP_ATOMIC_IMPL_C11 11 #if defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)) # define PSNIP_ATOMIC_IMPL PSNIP_ATOMIC_IMPL_GCC #elif !defined(__INTEL_COMPILER) && defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) /* GCC 4.7 and 4.8 sets __STDC_VERSION__ to C11 (if compiling in C11 * mode) and didn't have stdatomic.h, but failed to set * __STDC_NO_ATOMICS__. Verions prior to 4.7 didn't set * __STDC_VERSION__ to C11. */ # if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ < 9) # define PSNIP_ATOMIC_IMPL PSNIP_ATOMIC_IMPL_GCC # else # define PSNIP_ATOMIC_IMPL PSNIP_ATOMIC_IMPL_C11 # endif #elif defined(_MSC_VER) # define PSNIP_ATOMIC_IMPL PSNIP_ATOMIC_IMPL_MS #elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)) # define PSNIP_ATOMIC_IMPL PSNIP_ATOMIC_IMPL_GCC #elif PSNIP_ATOMIC_HAS_FEATURE(c_atomic) # define PSNIP_ATOMIC_IMPL PSNIP_ATOMIC_IMPL_CLANG #elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) # define PSNIP_ATOMIC_IMPL PSNIP_ATOMIC_IMPL_GCC_SYNC #elif (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x5140)) || (defined(__SUNPRO_CC) && (__SUNPRO_CC >= 0x5140)) # define PSNIP_ATOMIC_IMPL PSNIP_ATOMIC_IMPL_GCC #elif defined(_OPENMP) # define PSNIP_ATOMIC_IMPL PSNIP_ATOMIC_IMPL_OPENMP #else # define PSNIP_ATOMIC_NOT_FOUND # define PSNIP_ATOMIC_IMPL PSNIP_ATOMIC_IMPL_NONE # warning No atomic implementation found #endif #if !defined(PSNIP_ATOMIC_NOT_FOUND) #if PSNIP_ATOMIC_IMPL == PSNIP_ATOMIC_IMPL_C11 #include <stdatomic.h> typedef atomic_int_fast64_t psnip_atomic_int64; typedef atomic_int_fast32_t psnip_atomic_int32; #define PSNIP_ATOMIC_VAR_INIT(value) ATOMIC_VAR_INIT(value) #define psnip_atomic_int64_load(object) \ atomic_load(object) #define psnip_atomic_int64_store(object, desired) \ atomic_store(object, desired) #define psnip_atomic_int64_compare_exchange(object, expected, desired) \ atomic_compare_exchange_strong(object, expected, desired) #define psnip_atomic_int64_add(object, operand) \ atomic_fetch_add(object, operand) #define psnip_atomic_int64_sub(object, operand) \ atomic_fetch_sub(object, operand) #define psnip_atomic_fence() \ atomic_thread_fence(memory_order_seq_cst) #define PSNIP_ATOMIC_IS_TG #elif PSNIP_ATOMIC_IMPL == PSNIP_ATOMIC_IMPL_CLANG #include <stdint.h> typedef _Atomic psnip_int64_t psnip_atomic_int64; typedef _Atomic psnip_int32_t psnip_atomic_int32; #define psnip_atomic_int64_load(object) \ __c11_atomic_load(object, __ATOMIC_SEQ_CST) #define psnip_atomic_int64_store(object, desired) \ __c11_atomic_store(object, desired, __ATOMIC_SEQ_CST) #define psnip_atomic_int64_compare_exchange(object, expected, desired) \ __c11_atomic_compare_exchange_strong(object, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) #define psnip_atomic_int64_add(object, operand) \ __c11_atomic_fetch_add(object, operand, __ATOMIC_SEQ_CST) #define psnip_atomic_int64_sub(object, operand) \ __c11_atomic_fetch_sub(object, operand, __ATOMIC_SEQ_CST) #define psnip_atomic_fence() \ __c11_atomic_thread_fence(__ATOMIC_SEQ_CST) #define PSNIP_ATOMIC_IS_TG #elif PSNIP_ATOMIC_IMPL == PSNIP_ATOMIC_IMPL_GCC #include <stdint.h> #if !defined(__INTEL_COMPILER) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)) && !defined(_OPENMP) typedef _Atomic psnip_int64_t psnip_atomic_int64; typedef _Atomic psnip_int32_t psnip_atomic_int32; #else typedef psnip_int64_t psnip_atomic_int64; typedef psnip_int32_t psnip_atomic_int32; #endif #define psnip_atomic_int64_load(object) \ __atomic_load_n(object, __ATOMIC_SEQ_CST) #define psnip_atomic_int64_store(object, desired) \ __atomic_store_n(object, desired, __ATOMIC_SEQ_CST) #define psnip_atomic_int64_compare_exchange(object, expected, desired) \ __atomic_compare_exchange_n(object, expected, desired, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) #define psnip_atomic_int64_add(object, operand) \ __atomic_add_fetch(object, operand, __ATOMIC_SEQ_CST) #define psnip_atomic_int64_sub(object, operand) \ __atomic_sub_fetch(object, operand, __ATOMIC_SEQ_CST) #define psnip_atomic_fence() \ __atomic_thread_fence(__ATOMIC_SEQ_CST) #define PSNIP_ATOMIC_IS_TG #elif PSNIP_ATOMIC_IMPL == PSNIP_ATOMIC_IMPL_GCC_SYNC #include <stdint.h> typedef psnip_int64_t psnip_atomic_int64; typedef psnip_int32_t psnip_atomic_int32; PSNIP_ATOMIC__FUNCTION psnip_int64_t psnip_atomic_int64_load(psnip_atomic_int64* object) { __sync_synchronize(); return (psnip_int64_t) *object; } PSNIP_ATOMIC__FUNCTION void psnip_atomic_int64_store(psnip_atomic_int64* object, psnip_int64_t desired) { *object = desired; __sync_synchronize(); } #define psnip_atomic_int64_compare_exchange(object, expected, desired) \ __sync_bool_compare_and_swap(object, *(expected), desired) #define psnip_atomic_int64_add(object, operand) \ __sync_fetch_and_add(object, operand) #define psnip_atomic_int64_sub(object, operand) \ __sync_fetch_and_sub(object, operand) PSNIP_ATOMIC__FUNCTION psnip_int32_t psnip_atomic_int32_load(psnip_atomic_int32* object) { __sync_synchronize(); return (psnip_int32_t) *object; } PSNIP_ATOMIC__FUNCTION void psnip_atomic_int32_store(psnip_atomic_int32* object, psnip_int32_t desired) { *object = desired; __sync_synchronize(); } #define psnip_atomic_int32_compare_exchange(object, expected, desired) \ __sync_bool_compare_and_swap(object, *(expected), desired) #define psnip_atomic_int32_add(object, operand) \ __sync_fetch_and_add(object, operand) #define psnip_atomic_int32_sub(object, operand) \ __sync_fetch_and_sub(object, operand) #define psnip_atomic_fence() \ __sync_synchronize() #elif PSNIP_ATOMIC_IMPL == PSNIP_ATOMIC_IMPL_MS #include <Windows.h> typedef long long volatile psnip_atomic_int64; typedef long volatile psnip_atomic_int32; #define psnip_atomic_int32_load(object) \ __pragma(warning(push)) \ __pragma(warning(disable:28112)) \ (*(object)) \ __pragma(warning(pop)) #define psnip_atomic_int32_store(object, desired) \ InterlockedExchange(object, desired) #define psnip_atomic_int32_compare_exchange(object, expected, desired) \ InterlockedCompareExchange(object, desired, *(expected)) #define psnip_atomic_int32_add(object, operand) \ InterlockedExchangeAdd(object, operand) #define psnip_atomic_int32_sub(object, operand) \ InterlockedExchangeAdd(object, -(operand)) #define psnip_atomic_int64_load(object) \ __pragma(warning(push)) \ __pragma(warning(disable:28112)) \ (*(object)) \ __pragma(warning(pop)) #define psnip_atomic_int64_store(object, desired) \ InterlockedExchange64(object, desired) #define psnip_atomic_int64_compare_exchange(object, expected, desired) \ InterlockedCompareExchange64(object, desired, *(expected)) #define psnip_atomic_int64_add(object, operand) \ InterlockedExchangeAdd64(object, operand) #define psnip_atomic_int64_sub(object, operand) \ InterlockedExchangeAdd64(object, -(operand)) #define psnip_atomic_fence() \ MemoryBarrier() #elif PSNIP_ATOMIC_IMPL == PSNIP_ATOMIC_IMPL_OPENMP #include <stdint.h> typedef psnip_int64_t psnip_atomic_int64; typedef psnip_int32_t psnip_atomic_int32; PSNIP_ATOMIC__FUNCTION psnip_int64_t psnip_atomic_int64_load(psnip_atomic_int64* object) { psnip_int64_t ret; #pragma omp critical(psnip_atomic) ret = *object; return ret; } PSNIP_ATOMIC__FUNCTION void psnip_atomic_int64_store(psnip_atomic_int64* object, psnip_int64_t desired) { #pragma omp critical(psnip_atomic) *object = desired; } PSNIP_ATOMIC__FUNCTION int psnip_atomic_int64_compare_exchange_(psnip_atomic_int64* object, psnip_int64_t* expected, psnip_int64_t desired) { int ret; #pragma omp critical(psnip_atomic) ret = (*object == *expected) ? ((*object = desired), 1) : 0; return ret; } #define psnip_atomic_int64_compare_exchange(object, expected, desired) \ psnip_atomic_int64_compare_exchange_(object, expected, desired) PSNIP_ATOMIC__FUNCTION psnip_int64_t psnip_atomic_int64_add(psnip_atomic_int64* object, psnip_int64_t operand) { int ret; #pragma omp critical(psnip_atomic) *object = (ret = *object) + operand; return ret; } PSNIP_ATOMIC__FUNCTION psnip_int64_t psnip_atomic_int64_sub(psnip_atomic_int64* object, psnip_int64_t operand) { int ret; #pragma omp critical(psnip_atomic) *object = (ret = *object) - operand; return ret; } PSNIP_ATOMIC__FUNCTION psnip_int32_t psnip_atomic_int32_load(psnip_atomic_int32* object) { psnip_int32_t ret; #pragma omp critical(psnip_atomic) ret = *object; return ret; } PSNIP_ATOMIC__FUNCTION void psnip_atomic_int32_store(psnip_atomic_int32* object, psnip_int32_t desired) { #pragma omp critical(psnip_atomic) *object = desired; } PSNIP_ATOMIC__FUNCTION int psnip_atomic_int32_compare_exchange_(psnip_atomic_int32* object, psnip_int32_t* expected, psnip_int32_t desired) { int ret = 1; #pragma omp critical(psnip_atomic) ret = (*object == *expected) ? ((*object = desired), 1) : 0; return ret; } #define psnip_atomic_int32_compare_exchange(object, expected, desired) \ psnip_atomic_int32_compare_exchange_(object, expected, desired) PSNIP_ATOMIC__FUNCTION psnip_int32_t psnip_atomic_int32_add(psnip_atomic_int32* object, psnip_int32_t operand) { int ret; #pragma omp critical(psnip_atomic) *object = (ret = *object) + operand; return ret; } PSNIP_ATOMIC__FUNCTION psnip_int32_t psnip_atomic_int32_sub(psnip_atomic_int32* object, psnip_int32_t operand) { int ret; #pragma omp critical(psnip_atomic) *object = (ret = *object) - operand; return ret; } PSNIP_ATOMIC__FUNCTION void psnip_atomic_fence() { #pragma omp critical(psnip_atomic) { } } #endif #if !defined(PSNIP_ATOMIC_VAR_INIT) # define PSNIP_ATOMIC_VAR_INIT(value) (value) #endif /* Most compilers have type-generic atomic implementations. */ #if defined(PSNIP_ATOMIC_IS_TG) #define psnip_atomic_int32_load(object) \ psnip_atomic_int64_load(object) #define psnip_atomic_int32_store(object, desired) \ psnip_atomic_int64_store(object, desired) #define psnip_atomic_int32_compare_exchange(object, expected, desired) \ psnip_atomic_int64_compare_exchange(object, expected, desired) #define psnip_atomic_int32_add(object, operand) \ psnip_atomic_int64_add(object, operand) #define psnip_atomic_int32_sub(object, operand) \ psnip_atomic_int64_sub(object, operand) #endif /* defined(PSNIP_ATOMIC_IS_TG) */ #endif /* !defined(PSNIP_ATOMIC_NOT_FOUND) */ #endif /* defined(PSNIP_ATOMIC_H) */
openmp_common.c
// RUN: %clang_cc1 -triple x86_64-apple-macos10.7.0 -verify -fopenmp -ferror-limit 100 -o - %s #pragma omp // expected-error {{expected an OpenMP directive}} #pragma omp unknown_directive // expected-error {{expected an OpenMP directive}} void foo() { #pragma omp // expected-error {{expected an OpenMP directive}} #pragma omp unknown_directive // expected-error {{expected an OpenMP directive}} }
mixed_tentusscher_myo_epi_2004_S2_14.c
// Scenario 2 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) // (AP + max:dvdt) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S2_14.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5520015958853,0.00129455548553585,0.779309085270448,0.779120507104534,0.000175257612314416,0.484920925268272,0.00294370895749353,0.999998342569330,1.93874711998085e-08,1.89517017546677e-05,0.999771487389649,1.00728300939714,0.999997507479585,4.07467073122400e-05,1.09809284664416,9.20129773009743,140.104443387177}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={13.8459241921870,0.000196064566156457,0.000146553942706429,0.000703301155773898,0.260161768868762,0.181317938505467,0.117372181971868,3.63855954741155,0.0150905585683178,2.46920388142297,1094.85218851589,0.000479578430398116,0.318788483309312,0.0161958786068848,0.00436114068069140,1.30104522530463e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
critical.c
/* critical Directive Example */ #include <omp.h> main(int argc, char *argv[]) { int x; x = 0; #pragma omp parallel shared(x) { #pragma omp critical x = x + 1; } /* end of parallel region */ }
ContinuousScatterPlot.h
/// \ingroup base /// \class ttk::ContinuousScatterPlot /// \author Guillaume Favelier <guillaume.favelier@lip6.fr> /// \date March 2016 /// /// \brief TTK processing package that computes the continuous scatterplot of /// bivariate volumetric data. /// /// \b Related \b publication \n /// "Continuous Scatterplots" \n /// Sven Bachthaler, Daniel Weiskopf \n /// Proc. of IEEE VIS 2008.\n /// IEEE Transactions on Visualization and Computer Graphics, 2008. /// /// \sa ttkContinuousScatterPlot.cpp %for a usage example. #ifndef _CONTINUOUSSCATTERPLOT_H #define _CONTINUOUSSCATTERPLOT_H // base code includes #include<Wrapper.h> #include<Triangulation.h> #include<Geometry.h> namespace ttk{ class ContinuousScatterPlot : public Debug{ public: ContinuousScatterPlot(); ~ContinuousScatterPlot(); template<typename dataType1, typename dataType2> int execute() const; inline int setVertexNumber(const SimplexId& vertexNumber){ vertexNumber_=vertexNumber; return 0; } inline int setDummyValue(bool withDummyValue, double dummyValue){ if(withDummyValue){ withDummyValue_=true; dummyValue_=dummyValue; } return 0; } inline int setTriangulation(Triangulation* triangulation){ triangulation_=triangulation; return 0; } inline int setResolutions(const SimplexId& resolutionX, const SimplexId& resolutionY){ resolutions_[0]=resolutionX; resolutions_[1]=resolutionY; return 0; } inline int setInputScalarField1(void* data){ inputScalarField1_=data; return 0; } inline int setInputScalarField2(void* data){ inputScalarField2_=data; return 0; } inline int setScalarMin(double* scalarMin){ scalarMin_=scalarMin; return 0; } inline int setScalarMax(double* scalarMax){ scalarMax_=scalarMax; return 0; } inline int setOutputDensity(std::vector<std::vector<double>>* density){ density_=density; return 0; } inline int setOutputMask(std::vector<std::vector<char>>* mask){ validPointMask_=mask; return 0; } protected: SimplexId vertexNumber_; Triangulation* triangulation_; bool withDummyValue_; double dummyValue_; SimplexId resolutions_[2]; void* inputScalarField1_; void* inputScalarField2_; double* scalarMin_; double* scalarMax_; std::vector<std::vector<double>>* density_; std::vector<std::vector<char>>* validPointMask_; }; } template<typename dataType1, typename dataType2> int ttk::ContinuousScatterPlot::execute() const{ #ifndef TTK_ENABLE_KAMIKAZE if(!inputScalarField1_) return -1; if(!inputScalarField2_) return -2; if(!triangulation_) return -3; if(!density_) return -4; if(triangulation_->getNumberOfCells()<=0){ std::cerr << "[ContinuousScatterPlot] Error : no cells." << std::endl; return -5; } if(triangulation_->getCellVertexNumber(0) != 4){ std::cerr << "[ContinuousScatterPlot] Error : no tetrahedra." << std::endl; return -6; } #endif dataType1* scalars1=static_cast<dataType1*>(inputScalarField1_); dataType2* scalars2=static_cast<dataType2*>(inputScalarField2_); Timer t; // helpers: const SimplexId numberOfCells=triangulation_->getNumberOfCells(); // rendering helpers: // constant ray direction (ortho) const double d[3]{0,0,-1}; const double delta[2]{scalarMax_[0]-scalarMin_[0],scalarMax_[1]-scalarMin_[1]}; const double sampling[2]{delta[0]/resolutions_[0],delta[1]/resolutions_[1]}; const double epsilon{0.000001}; #ifdef TTK_ENABLE_OPENMP #pragma omp parallel for num_threads(threadNumber_) #endif for(SimplexId cell=0; cell<numberOfCells; ++cell){ bool isDummy{}; // get tetrahedron info SimplexId vertex[4]; double data[4][3]; float position[4][3]; double localScalarMin[2]{}; double localScalarMax[2]{}; // for each triangle for(int k=0; k<4; ++k){ // get indices triangulation_->getCellVertex(cell,k,vertex[k]); // get scalars data[k][0]=scalars1[vertex[k]]; data[k][1]=scalars2[vertex[k]]; data[k][2]=0; if(withDummyValue_ and (data[k][0]==dummyValue_ or data[k][1]==dummyValue_)){ isDummy=true; break; } // get local stats if(!k or localScalarMin[0]>data[k][0]) localScalarMin[0]=data[k][0]; if(!k or localScalarMin[1]>data[k][1]) localScalarMin[1]=data[k][1]; if(!k or localScalarMax[0]<data[k][0]) localScalarMax[0]=data[k][0]; if(!k or localScalarMax[1]<data[k][1]) localScalarMax[1]=data[k][1]; // get positions triangulation_->getVertexPoint(vertex[k],position[k][0],position[k][1],position[ k][2]); } if(isDummy) continue; // gradient: double g0[3]; double g1[3]; { double v12[3]; double v13[3]; double v14[3]; double s12[3]; double s13[3]; double s14[3]; for(int k=0; k<3; ++k){ v12[k]=position[1][k]-position[0][k]; v13[k]=position[2][k]-position[0][k]; v14[k]=position[3][k]-position[0][k]; s12[k]=data[1][k]-data[0][k]; s13[k]=data[2][k]-data[0][k]; s14[k]=data[3][k]-data[0][k]; } double a[3]; double b[3]; double c[3]; Geometry::crossProduct(v13,v12,a); Geometry::crossProduct(v12,v14,b); Geometry::crossProduct(v14,v13,c); double det=Geometry::dotProduct(v14,a); if(det == 0.){ for(int k=0; k<3; ++k){ g0[k]=0.0; g1[k]=0.0; } } else{ double invDet=1.0/det; for(int k=0; k<3; ++k){ g0[k]=(s14[0]*a[k] + s13[0]*b[k] + s12[0]*c[k]) * invDet; g1[k]=(s14[1]*a[k] + s13[1]*b[k] + s12[1]*c[k]) * invDet; } } } // volume: double volume; bool isLimit{}; { double cp[3]; Geometry::crossProduct(g0,g1,cp); volume=Geometry::magnitude(cp); if(volume == 0.) isLimit=true; } // classification: int index[4]{0,1,2,3}; bool isInTriangle{}; if(Geometry::isPointInTriangle(data[0],data[1],data[2],data[3])) isInTriangle=true; else if(Geometry::isPointInTriangle(data[0],data[1],data[3],data[2])){ isInTriangle=true; index[0]=0; index[1]=1; index[2]=3; index[3]=2; } else if(Geometry::isPointInTriangle(data[0],data[2],data[3],data[1])){ isInTriangle=true; index[0]=0; index[1]=2; index[2]=3; index[3]=1; } else if(Geometry::isPointInTriangle(data[1],data[2],data[3],data[0])){ isInTriangle=true; index[0]=1; index[1]=2; index[2]=3; index[3]=0; } // projection: double density{}; std::vector<std::vector<SimplexId>> triangles; double imaginaryPosition[3]{}; std::vector<SimplexId> triangle(3); // class 0 if(isInTriangle){ // mass density double massDensity{}; { double A; Geometry::computeTriangleArea(data[index[0]],data[index[1]],data[index[2]],A); double invA=1.0/A; if(A == 0.){ invA=0.0; isLimit=true; } double alpha, beta, gamma; Geometry::computeTriangleArea(data[index[1]],data[index[2]],data[index[3]],alpha ); Geometry::computeTriangleArea(data[index[0]],data[index[2]],data[index[3]],beta) ; Geometry::computeTriangleArea(data[index[0]],data[index[1]],data[index[3]],gamma ); alpha*=invA; beta*=invA; gamma*=invA; double p0[3]; double p1[3]; for(int k=0; k<3; ++k){ p0[k]=position[index[3]][k]; p1[k]=alpha*position[index[0]][k]+beta*position[index[1]][k]+gamma*position[ index[2]][k]; } massDensity=Geometry::distance(p0,p1); } if(isLimit) density=std::numeric_limits<decltype(density)>::max(); else density=massDensity/volume; triangle[0]=vertex[index[3]]; triangle[1]=vertex[index[0]]; triangle[2]=vertex[index[1]]; triangles.push_back(triangle); triangle[0]=vertex[index[3]]; triangle[1]=vertex[index[0]]; triangle[2]=vertex[index[2]]; triangles.push_back(triangle); triangle[0]=vertex[index[3]]; triangle[1]=vertex[index[1]]; triangle[2]=vertex[index[2]]; triangles.push_back(triangle); } // class 1 else{ double massDensity{}; double p[3]{0,0,0}; if(Geometry::computeSegmentIntersection(data[0][0],data[0][1], data[1][0],data[1][1], data[2][0],data[2][1], data[3][0],data[3][1], p[0],p[1])){ index[0]=0; index[1]=1; index[2]=2; index[3]=3; } else if(Geometry::computeSegmentIntersection(data[0][0],data[0][1], data[2][0],data[2][1], data[1][0],data[1][1], data[3][0],data[3][1], p[0],p[1])){ index[0]=0; index[1]=2; index[2]=1; index[3]=3; } else if(Geometry::computeSegmentIntersection(data[0][0],data[0][1], data[3][0],data[3][1], data[1][0],data[1][1], data[2][0],data[2][1], p[0],p[1])){ index[0]=0; index[1]=3; index[2]=1; index[3]=2; } double a=Geometry::distance(data[index[0]],p); double b=Geometry::distance(data[index[0]],data[index[1]]); double r0=a/b; a=Geometry::distance(data[index[2]],p); b=Geometry::distance(data[index[2]],data[index[3]]); double r1=a/b; double p0[3]; double p1[3]; for(int k=0; k<3; ++k){ p0[k]=position[index[0]][k]+r0*(position[index[1]][k]-position[index[0]][k]); p1[k]=position[index[2]][k]+r1*(position[index[3]][k]-position[index[2]][k]); } massDensity=Geometry::distance(p0,p1); if(isLimit) density=std::numeric_limits<decltype(density)>::max(); else density=massDensity/volume; imaginaryPosition[0]=p[0]; imaginaryPosition[1]=p[1]; imaginaryPosition[2]=0; // four triangles projection triangle[0]=-1; // new geometry triangle[1]=vertex[index[0]]; triangle[2]=vertex[index[2]]; triangles.push_back(triangle); triangle[1]=vertex[index[2]]; triangle[2]=vertex[index[1]]; triangles.push_back(triangle); triangle[1]=vertex[index[1]]; triangle[2]=vertex[index[3]]; triangles.push_back(triangle); triangle[1]=vertex[index[3]]; triangle[2]=vertex[index[0]]; triangles.push_back(triangle); } // rendering: // "Fast, Minimum Storage Ray/Triangle Intersection", Tomas Moller & Ben // Trumbore { const SimplexId minI=floor((localScalarMin[0]-scalarMin_[0])/sampling[0]); const SimplexId minJ=floor((localScalarMin[1]-scalarMin_[1])/sampling[1]); const SimplexId maxI=ceil((localScalarMax[0]-scalarMin_[0])/sampling[0]); const SimplexId maxJ=ceil((localScalarMax[1]-scalarMin_[1])/sampling[1]); for(SimplexId i=minI; i<maxI; ++i){ for(SimplexId j=minJ; j<maxJ; ++j){ // set ray origin const double o[3]{scalarMin_[0]+i*sampling[0],scalarMin_[1]+j*sampling[1],1}; for(unsigned int k=0; k<triangles.size(); ++k){ const auto& triangle=triangles[k]; // get triangle info double p0[3]; if(isInTriangle){ p0[0]=scalars1[triangle[0]]; p0[1]=scalars2[triangle[0]]; } else{ p0[0]=imaginaryPosition[0]; p0[1]=imaginaryPosition[1]; } p0[2]=0; const double p1[3]{(double)scalars1[triangle[1]],(double)scalars2[triangle[1]],0}; const double p2[3]{(double)scalars1[triangle[2]],(double)scalars2[triangle[2]],0}; const double e1[3]{p1[0]-p0[0],p1[1]-p0[1],0}; const double e2[3]{p2[0]-p0[0],p2[1]-p0[1],0}; double q[3]; Geometry::crossProduct(d,e2,q); const double a=Geometry::dotProduct(e1,q); if(a>-epsilon and a<epsilon) continue; const double f=1.0/a; const double s[3]{o[0]-p0[0],o[1]-p0[1],1}; const double u=f * Geometry::dotProduct(s,q); if(u<0.0) continue; double r[3]; Geometry::crossProduct(s,e1,r); const double v=f * Geometry::dotProduct(d,r); if(v<0.0 or (u+v)>1.0) continue; // triangle/ray intersection below #ifdef TTK_ENABLE_OPENMP #ifdef _WIN32 #pragma omp atomic #else #pragma omp atomic update #endif #endif (*density_)[i][j]+=(1.0-u-v)*density; #ifdef TTK_ENABLE_OPENMP #ifdef _WIN32 #pragma omp atomic (*validPointMask_)[i][j] += 1; #else #pragma omp atomic write (*validPointMask_)[i][j]=1; #endif #else (*validPointMask_)[i][j]=1; #endif break; } } } } } { std::stringstream msg; msg << "[ContinuousScatterPlot] Data-set (" << numberOfCells << " tetrahedra) processed in " << t.getElapsedTime() << " s. (" << threadNumber_ << " thread(s))." << std::endl; dMsg(std::cout, msg.str(), timeMsg); } return 0; } #endif // CONTINUOUSSCATTERPLOT_H
dacemath.c
/****************************************************************************** * * * DIFFERENTIAL ALGEBRA CORE ENGINE * * * ******************************************************************************* * * * Copyright 2016 Politecnico di Milano (2014 Dinamica Srl) * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * * * *******************************************************************************/ /* * dacemath.c * * Created on: November 18, 2016 * Author: Politecnico di Milano */ /** \addtogroup DACE Core * @{ */ // MS C library needs this to trigger it to define math constants #define _USE_MATH_DEFINES #include <math.h> #include <stdlib.h> #include "dace/config.h" #include "dace/dacebase.h" #include "dace/daceaux.h" #include "dacecontrib.h" // define various math constants in case they have not been defined by math.h // these are non-standard C, but most C libraries have them #ifndef M_PI #define M_PI (3.14159265358979323846) #endif #ifndef M_PI_2 #define M_PI_2 (1.57079632679489661923) #endif /******************************************************************************** * Basic DACE arithmetic operations *********************************************************************************/ /*! Perform addition of two DA objects. \param[in] ina Pointer to the first DA object to operate on \param[in] inb Pointer to the first DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina or inb. */ void daceAdd(const DACEDA *ina, const DACEDA *inb, DACEDA *inc) { if(!daceIsSameObject(ina, inc) && !daceIsSameObject(inb, inc)) { daceWeightedSum(ina, 1.0, inb, 1.0, inc); } else { DACEDA idaadd; daceAllocateDA(&idaadd, 0); daceWeightedSum(ina, 1.0, inb, 1.0, &idaadd); daceCopy(&idaadd, inc); daceFreeDA(&idaadd); } } /*! Perform subtraction of two DA objects. \param[in] ina Pointer to the first DA object to operate on \param[in] inb Pointer to the first DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina or inb. */ void daceSubtract(const DACEDA *ina, const DACEDA *inb, DACEDA *inc) { if(!daceIsSameObject(ina, inc) && !daceIsSameObject(inb, inc)) { daceWeightedSum(ina, 1.0, inb, -1.0, inc); } else { DACEDA idasub; daceAllocateDA(&idasub, 0); daceWeightedSum(ina, 1.0, inb, -1.0, &idasub); daceCopy(&idasub, inc); daceFreeDA(&idasub); } } /*! Perform multiplication of two DA objects. \param[in] ina Pointer to the first DA object to operate on \param[in] inb Pointer to the first DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina or inb. */ void daceMultiply(const DACEDA *ina, const DACEDA *inb, DACEDA *inc) { // These should use thread local storage (TLS) for multithread safe implementations // see https://en.wikipedia.org/wiki/Thread-local_storage #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC static DACE_THREAD_LOCAL double cc[DACE_STATIC_NMMAX] = {0}; static DACE_THREAD_LOCAL extended_monomial emb[DACE_STATIC_NMMAX]; static DACE_THREAD_LOCAL extended_monomial *ipbeg[DACE_STATIC_NOMAX+1]; static DACE_THREAD_LOCAL extended_monomial *ipend[DACE_STATIC_NOMAX+1]; static DACE_THREAD_LOCAL unsigned int nomax = 0; static DACE_THREAD_LOCAL unsigned int nvmax = 0; // make sure static memory is correctly allocated if(UNLIKELY(nomax != DACECom.nomax || nvmax != DACECom.nvmax)) { nomax = DACECom.nomax; nvmax = DACECom.nvmax; ipbeg[0] = &emb[0]; for(unsigned int i = 1; i <= DACECom.nomax; i++) ipbeg[i] = emb + daceCountMonomials(i - 1, DACECom.nvmax); } #else static DACE_THREAD_LOCAL double *cc = NULL; static DACE_THREAD_LOCAL extended_monomial *emb = NULL; static DACE_THREAD_LOCAL extended_monomial **ipbeg = NULL; static DACE_THREAD_LOCAL extended_monomial **ipend = NULL; static DACE_THREAD_LOCAL unsigned int nomax = 0; static DACE_THREAD_LOCAL unsigned int nvmax = 0; // make sure static memory is correctly allocated if(UNLIKELY(nomax != DACECom.nomax || nvmax != DACECom.nvmax)) { nomax = DACECom.nomax; nvmax = DACECom.nvmax; dacefree(cc); dacefree(emb); dacefree(ipbeg); dacefree(ipend); cc = (double*) dacecalloc(DACECom.nmmax, sizeof(double)); emb = (extended_monomial*) dacecalloc(DACECom.nmmax, sizeof(extended_monomial)); ipbeg = (extended_monomial**) dacecalloc(DACECom.nomax+1, sizeof(extended_monomial*)); ipend = (extended_monomial**) dacecalloc(DACECom.nomax+1, sizeof(extended_monomial*)); ipbeg[0] = &emb[0]; for(unsigned int i = 1; i <= DACECom.nomax; i++) ipbeg[i] = emb + daceCountMonomials(i - 1, DACECom.nvmax); } #endif monomial *ipoa; unsigned int ilma, illa; monomial *ipob; unsigned int ilmb, illb; daceVariableInformation(ina, &ipoa, &ilma, &illa); daceVariableInformation(inb, &ipob, &ilmb, &illb); // sort so that ina is the short DA vector if(illa>illb) { unsigned int t1; t1 = illb; illb = illa; illa = t1; t1 = ilmb; ilmb = ilma; ilma = t1; monomial* t2; t2 = ipoa; ipoa = ipob; ipob = t2; } for(unsigned int i = 0; i <= DACECom_t.nocut; i++) ipend[i] = ipbeg[i]; // sort vector b by order for(monomial *ib = ipob; ib < ipob+illb; ib++) { const unsigned int noib = DACECom.ieo[ib->ii]; if(noib > DACECom_t.nocut) continue; ipend[noib]->i1 = DACECom.ie1[ib->ii]; ipend[noib]->i2 = DACECom.ie2[ib->ii]; ipend[noib]->cc = ib->cc; ipend[noib]++; } // perform actual multiplication for(monomial *ia = ipoa; ia < ipoa+illa; ia++) { const unsigned int i1ia = DACECom.ie1[ia->ii]; const unsigned int i2ia = DACECom.ie2[ia->ii]; const double ccia = ia->cc; // Note: all of these inner loops can safely be run in parallel //#pragma omp parallel for for(int noib = DACECom_t.nocut-DACECom.ieo[ia->ii]; noib >= 0; noib--) { for(extended_monomial *ib = ipbeg[noib]; ib < ipend[noib]; ib++) { const unsigned int ic = DACECom.ia1[i1ia+ib->i1] + DACECom.ia2[i2ia+ib->i2]; cc[ic] += ccia*ib->cc; } } } dacePack(cc, inc); } /*! Multiply two DA vectors component-wise, i.e. each monomial of ina with the corresponding monomial of inb \param[in] ina Pointer to the first DA object to operate on \param[in] inb Pointer to the first DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina or inb. \sa daceEvalMonomials */ void daceMultiplyMonomials(const DACEDA *ina, const DACEDA *inb, DACEDA *inc) { monomial *ipoa; unsigned int ilma, illa; monomial *ipob; unsigned int ilmb, illb; monomial *ipoc; unsigned int ilmc, illc; daceVariableInformation(ina, &ipoa, &ilma, &illa); daceVariableInformation(inb, &ipob, &ilmb, &illb); daceVariableInformation(inc, &ipoc, &ilmc, &illc); monomial *ib = ipob, *ic = ipoc; monomial *const ibmax = ipob + ilmb, *const icmax = ipoc + ilmc; for (monomial *i = ipoa; i < ipoa + illa; i++) { while (ib->ii < i->ii && ib < ibmax) ib++; if (ib == ibmax) break; if (ib->ii == i->ii) { if (ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); break; } ic->cc = i->cc*ib->cc; ic->ii = i->ii; ic++; } } } /*! Perform division of two DA objects. \param[in] ina Pointer to the first DA object to operate on \param[in] inb Pointer to the first DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina or inb. */ void daceDivide(const DACEDA *ina, const DACEDA *inb, DACEDA *inc) { DACEDA idadiv; daceAllocateDA(&idadiv, 0); daceMultiplicativeInverse(inb, &idadiv); daceMultiply(ina, &idadiv, inc); daceFreeDA(&idadiv); } /*! Square a DA object. \param[in] ina Pointer to the DA object to square \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceSquare(const DACEDA *ina, DACEDA *inb) { daceMultiply(ina, ina, inb); } /*! Add constant to a DA object. \param[in] ina Pointer to the first DA object to operate on \param[in] ckon Constant value to add \param[out] inb Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inb can be the same as ina. */ void daceAddDouble(const DACEDA *ina, const double ckon, DACEDA *inb) { if(!daceIsSameObject(ina, inb)) daceCopy(ina, inb); daceSetCoefficient0(inb, 0, daceGetConstant(inb)+ckon); } /*! Subtract DA object from constant. \param[in] ina Pointer to the first DA object to operate on \param[in] ckon Constant value to subtract from \param[out] inb Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inb can be the same as ina. */ void daceDoubleSubtract(const DACEDA *ina, const double ckon, DACEDA *inb) { daceMultiplyDouble(ina, -1.0, inb); daceSetCoefficient0(inb, 0, daceGetConstant(inb)+ckon); } /*! Subtract constant from a DA object. \param[in] ina Pointer to the first DA object to operate on \param[in] ckon Constant value to subtract \param[out] inb Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inb can be the same as ina. */ void daceSubtractDouble(const DACEDA *ina, const double ckon, DACEDA *inb) { daceAddDouble(ina, -ckon, inb); } /*! Multiply constant and DA object. \param[in] ina Pointer to the first DA object to operate on \param[in] ckon Constant value to multiply by \param[out] inb Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inb can be the same as ina. */ void daceMultiplyDouble(const DACEDA *ina, const double ckon, DACEDA *inb) { monomial *ipoa; unsigned int ilma, illa; monomial *ipob; unsigned int ilmb, illb; daceVariableInformation(ina, &ipoa, &ilma, &illa); daceVariableInformation(inb, &ipob, &ilmb, &illb); monomial *ib = ipob; if(illa <= ilmb) { for(monomial *ia = ipoa; ia < ipoa+illa; ia++) { if(DACECom.ieo[ia->ii] > DACECom_t.nocut) continue; const double c = ia->cc*ckon; if(fabs(c) < DACECom_t.eps) continue; ib->cc = c; ib->ii = ia->ii; ib++; } } else { monomial *const ibmax = ipob+ilmb; for(monomial *ia = ipoa; ia < ipoa+illa; ia++) { if(DACECom.ieo[ia->ii] > DACECom_t.nocut) continue; const double c = ia->cc*ckon; if(fabs(c) < DACECom_t.eps) continue; if(ib >= ibmax) { daceSetError(__func__, DACE_ERROR, 21); break; } ib->cc = c; ib->ii = ia->ii; ib++; } } daceSetLength(inb, ib-ipob); } /*! Divide DA object by a constant. \param[in] ina Pointer to the first DA object to operate on \param[in] ckon Constant value to divide by \param[out] inb Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inb can be the same as ina. */ void daceDivideDouble(const DACEDA *ina, const double ckon, DACEDA *inb) { if(ckon == 0.0) { daceSetError(__func__, DACE_ERROR, 41); daceCreateConstant(inb, 0.0); return; } daceMultiplyDouble(ina, 1.0/ckon, inb); } /*! Divide constant by DA object. \param[in] ina Pointer to the first DA object to operate on \param[in] ckon Constant value to divide \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceDoubleDivide(const DACEDA *ina, const double ckon, DACEDA *inc) { daceMultiplicativeInverse(ina, inc); daceMultiplyDouble(inc, ckon, inc); } /*! Divide a DA vector by a single variable to some power, if possible. \param[in] ina Pointer to the DA object to operate on \param[in] var Number of the independent variable by which to divide \param[in] p Power of independent variable \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceDivideByVariable(const DACEDA *ina, const unsigned int var, const unsigned int p, DACEDA *inc) { monomial *ipoa; unsigned int ilma, illa; monomial *ipoc; unsigned int ilmc, illc; daceVariableInformation(ina, &ipoa, &ilma, &illa); daceVariableInformation(inc, &ipoc, &ilmc, &illc); if(var < 1 || var > DACECom.nvmax) { daceSetError(__func__, DACE_ERROR, 24); daceCreateConstant(inc, 0.0); return; } // treat a few special cases if(p == 0) { // dividing by 1 daceCopy(ina, inc); return; } else if(illa == 0) { // dividing 0 by anything daceCreateConstant(inc, 0.0); return; } else if(p > DACECom.nomax) { // dividing non-zero DA by too high a power daceSetError(__func__, DACE_ERROR, 42); daceCreateConstant(inc, 0.0); return; } const unsigned int ibase = DACECom.nomax+1; unsigned int j = var-1; if(var > DACECom.nv1) j = j-DACECom.nv1; const unsigned int idiv = npown(ibase, j); monomial *ic = ipoc; monomial *const icmax = ipoc+ilmc; if(var > DACECom.nv1) { for(monomial *i = ipoa; i < ipoa+illa; i++) { const unsigned int ic1 = DACECom.ie1[i->ii]; const unsigned int ic2 = DACECom.ie2[i->ii]; const unsigned int ipow = (ic2/idiv)%ibase; if(ipow < p) { daceSetError(__func__, DACE_ERROR, 42); daceCreateConstant(inc, 0.0); return; } if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); break; } ic->ii = DACECom.ia1[ic1] + DACECom.ia2[ic2-p*idiv]; ic->cc = i->cc; ic++; } } else { for(monomial *i = ipoa; i < ipoa+illa; i++) { const unsigned int ic1 = DACECom.ie1[i->ii]; const unsigned int ic2 = DACECom.ie2[i->ii]; const unsigned int ipow = (ic1/idiv)%ibase; if(ipow < p) { daceSetError(__func__, DACE_ERROR, 42); daceCreateConstant(inc, 0.0); return; } if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); break; } ic->ii = DACECom.ia1[ic1-p*idiv] + DACECom.ia2[ic2]; ic->cc = i->cc; ic++; } } daceSetLength(inc, ic-ipoc); } /*! Derivative of DA object with respect to a given independent variable. \param[in] idif Number of the independent variable with respect to which the derivative is taken \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceDifferentiate(const unsigned int idif, const DACEDA *ina, DACEDA *inc) { monomial *ipoa; unsigned int ilma, illa; monomial *ipoc; unsigned int ilmc, illc; daceVariableInformation(ina, &ipoa, &ilma, &illa); daceVariableInformation(inc, &ipoc, &ilmc, &illc); if(idif < 1 || idif > DACECom.nvmax) { daceSetError(__func__, DACE_ERROR, 24); daceCreateConstant(inc, 0.0); return; } const unsigned int ibase = DACECom.nomax+1; unsigned int j = idif-1; if(idif > DACECom.nv1) j = j-DACECom.nv1; const unsigned int idiv = npown(ibase, j); monomial *ic = ipoc; monomial *const icmax = ipoc+ilmc; if(idif > DACECom.nv1) { for(monomial *i = ipoa; i < ipoa+illa; i++) { const unsigned int ic1 = DACECom.ie1[i->ii]; const unsigned int ic2 = DACECom.ie2[i->ii]; const unsigned int ipow = (ic2/idiv)%ibase; if(ipow == 0 || DACECom.ieo[i->ii] > DACECom_t.nocut+1) continue; if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); break; } ic->ii = DACECom.ia1[ic1] + DACECom.ia2[ic2-idiv]; ic->cc = i->cc*ipow; ic++; } } else { for(monomial *i = ipoa; i < ipoa+illa; i++) { const unsigned int ic1 = DACECom.ie1[i->ii]; const unsigned int ic2 = DACECom.ie2[i->ii]; const unsigned int ipow = (ic1/idiv)%ibase; if(ipow == 0 || DACECom.ieo[i->ii] > DACECom_t.nocut+1) continue; if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); break; } ic->ii = DACECom.ia1[ic1-idiv] + DACECom.ia2[ic2]; ic->cc = i->cc*ipow; ic++; } } daceSetLength(inc, ic-ipoc); } /*! Integral of DA object with respect to a given independent variable. \param[in] idif Number of the independent variable with respect to which the integral is taken \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceIntegrate(const unsigned int iint, const DACEDA *ina, DACEDA *inc) { monomial *ipoa; unsigned int ilma, illa; monomial *ipoc; unsigned int ilmc, illc; daceVariableInformation(ina, &ipoa, &ilma, &illa); daceVariableInformation(inc, &ipoc, &ilmc, &illc); if(iint < 1 || iint > DACECom.nvmax) { daceSetError(__func__, DACE_ERROR, 24); daceCreateConstant(inc, 0.0); return; } const unsigned int ibase = DACECom.nomax+1; unsigned int j = iint-1; if(iint > DACECom.nv1) j = j-DACECom.nv1; const unsigned int idiv = npown(ibase, j); monomial *ic = ipoc; monomial *const icmax = ipoc+ilmc; if(iint > DACECom.nv1) { for(monomial *i = ipoa; i < ipoa+illa; i++) { if(DACECom.ieo[i->ii] >= DACECom_t.nocut) continue; const unsigned int ic1 = DACECom.ie1[i->ii]; const unsigned int ic2 = DACECom.ie2[i->ii]; const unsigned int ipow = (ic2/idiv)%ibase; const double ccc = i->cc/(ipow+1); if(fabs(ccc) < DACECom_t.eps) continue; if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); break; } ic->ii = DACECom.ia1[ic1] + DACECom.ia2[ic2+idiv]; ic->cc = ccc; ic = ic+1; } } else { for(monomial *i = ipoa; i < ipoa+illa; i++) { if(DACECom.ieo[i->ii] >= DACECom_t.nocut) continue; const unsigned int ic1 = DACECom.ie1[i->ii]; const unsigned int ic2 = DACECom.ie2[i->ii]; const unsigned int ipow = (ic1/idiv)%ibase; const double ccc = i->cc/(ipow+1); if(fabs(ccc) < DACECom_t.eps) continue; if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); break; } ic->ii = DACECom.ia1[ic1+idiv] + DACECom.ia2[ic2]; ic->cc = ccc; ic = ic+1; } } daceSetLength(inc, ic-ipoc); } /******************************************************************************** * DACE intrinsic function routines *********************************************************************************/ /*! Truncate the constant part of a DA object to an integer. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceTruncate(const DACEDA *ina, DACEDA *inc) { daceCopy(ina, inc); daceSetCoefficient0(inc, 0, rint(daceGetConstant(inc))); } /*! Round the constant part of a DA object to an integer. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceRound(const DACEDA *ina, DACEDA *inc) { daceCopy(ina, inc); daceSetCoefficient0(inc, 0, round(daceGetConstant(inc))); } /*! Modulo the constant part of a DA object by p. \param[in] ina Pointer to the DA object to operate on \param[in] p Value with respect to which to compute the modulo \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceModulo(const DACEDA *ina, const double p, DACEDA *inc) { daceCopy(ina, inc); daceSetCoefficient0(inc, 0, fmod(daceGetConstant(inc),p)); } /*! Raise a DA object to the p-th power. \param[in] ina Pointer to the DA object to operate on \param[in] p Power to which to raise the DA object \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void dacePowerDouble(const DACEDA *ina, const double p, DACEDA *inc) { // check simple cases if(p == 0.0) { daceCreateConstant(inc, 1.0); return; } else if(p == (int)p) { dacePower(ina, (int)p, inc); return; } const double a0 = daceGetConstant(ina); if(a0 <= 0.0) { daceSetError(__func__, DACE_ERROR, 43); daceCreateConstant(inc, 0.0); return; } #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double *xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif xf[0] = pow(a0, p); for(unsigned int i = 1; i < DACECom_t.nocut+1; i++) xf[i] = xf[i-1]/i*(p-(i-1)); daceDivideDouble(ina, a0, inc); // more accurate than including a0 in series (uses non-linear part in EvaluateSeries) daceEvaluateSeries(inc, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Raise a DA object to the p-th integer power. \param[in] ina Pointer to the DA object to operate on \param[in] p Power to which to raise the DA object \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void dacePower(const DACEDA *ina, const int np, DACEDA *inc) { DACEDA itemp; // handle some common simple cases directly switch(np) { case 0: daceCreateConstant(inc, 1.0); return; case 1: daceCopy(ina, inc); return; case -1: daceMultiplicativeInverse(ina, inc); return; } // handle all other cases, again with common special cases hard coded switch(abs(np)) { case 2: daceSquare(ina, inc); break; case 3: daceAllocateDA(&itemp, 0); daceSquare(ina, &itemp); daceMultiply(ina, &itemp, inc); daceFreeDA(&itemp); break; case 4: daceAllocateDA(&itemp, 0); daceSquare(ina, &itemp); daceSquare(&itemp, inc); daceFreeDA(&itemp); break; default: daceAllocateDA(&itemp, 0); daceCopy(ina, &itemp); daceCreateConstant(inc, 1.0); unsigned int inp = abs(np); while(inp) { if(inp & 1u) daceMultiply(inc, &itemp, inc); inp >>= 1; if(inp) daceSquare(&itemp, &itemp); } daceFreeDA(&itemp); } if(np < 0) daceMultiplicativeInverse(inc, inc); } /*! Take the np-th root of a DA object. \param[in] ina Pointer to the DA object to operate on \param[in] np Root to take of the DA object \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceRoot(const DACEDA *ina, const int np, DACEDA *inc) { if(np == 0) { daceSetError(__func__, DACE_ERROR, 44); daceCreateConstant(inc, 0.0); return; } const double a0 = daceGetConstant(ina); const unsigned int iodd = abs(np) & 1u; if((iodd == 0) && (a0 <= 0.0)) { daceSetError(__func__, DACE_ERROR, 45); daceCreateConstant(inc, 0.0); return; } else if((iodd == 1) && (a0 == 0.0)) { daceSetError(__func__, DACE_ERROR, 46); daceCreateConstant(inc, 0.0); return; } double cr = 1.0/np; #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double *xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif xf[0] = copysign(pow(fabs(a0), cr), a0); for(unsigned int i = 1; i < DACECom_t.nocut+1; i++) { xf[i] = xf[i-1]/i*cr; cr--; } daceDivideDouble(ina, a0, inc); // more accurate than including a0 in series (uses non-linear part in EvaluateSeries) daceEvaluateSeries(inc, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the multiplicative inverse of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceMultiplicativeInverse(const DACEDA *ina, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 == 0.0) { daceSetError(__func__, DACE_ERROR, 41); daceCreateConstant(inc, 0.0); return; } if(DACECom_t.nocut < 5) { // lower orders: compute series directly daceMultiplicativeInverse0(ina, inc, a0); } else { // higher orders: use iteration const unsigned int nocut = DACECom_t.nocut; DACECom_t.nocut = 2; daceMultiplicativeInverse0(ina, inc, a0); DACEDA temp; daceAllocateDA(&temp, 0); for(unsigned int ord = 3; ord <= nocut; ord *= 2) { DACECom_t.nocut = umin(nocut, 2*ord-1); daceMultiply(ina, inc, &temp); daceDoubleSubtract(&temp, 2.0, &temp); daceMultiply(inc, &temp, inc); } daceFreeDA(&temp); } } /*! Compute the multiplicative inverse of a DA object using series expansion. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \param[in] a0 Constant part of ina \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceMultiplicativeInverse0(const DACEDA *ina, DACEDA *inc, const double a0) { daceDivideDouble(ina, a0, inc); #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double *xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif xf[0] = 1.0/a0; for(unsigned int i = 1; i < DACECom_t.nocut+1; i++) xf[i] = -xf[i-1]; daceEvaluateSeries(inc, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the square root of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceSquareRoot(const DACEDA *ina, DACEDA *inc) { daceRoot(ina, 2, inc); } /*! Compute the inverse square root of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceInverseSquareRoot(const DACEDA *ina, DACEDA *inc) { daceRoot(ina, -2, inc); } /*! Compute the cubic root of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceCubicRoot(const DACEDA *ina, DACEDA *inc) { daceRoot(ina, 3, inc); } /*! Compute the inverse cubic root of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceInverseCubicRoot(const DACEDA *ina, DACEDA *inc) { daceRoot(ina, -3, inc); } /*! Compute the hypothenuse of two DA objects. \param[in] ina Pointer to the first DA object to operate on \param[in] inb Pointer to the second DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina or inb. */ void daceHypotenuse(const DACEDA *ina, const DACEDA *inb, DACEDA *inc) { DACEDA itemp1, itemp2; daceAllocateDA(&itemp1, 0); daceAllocateDA(&itemp2, 0); daceSquare(ina, &itemp1); daceSquare(inb, &itemp2); daceAdd(&itemp1, &itemp2, inc); daceRoot(inc, 2, inc); daceFreeDA(&itemp2); daceFreeDA(&itemp1); } /*! Compute the exponential of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceExponential(const DACEDA *ina, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif xf[0] = exp(daceGetConstant(ina)); for(unsigned int i = 1; i < DACECom_t.nocut+1; i++) xf[i] = xf[i-1]/i; daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the natural logarithm root of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceLogarithm(const DACEDA *ina, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 <= 0) { daceSetError(__func__, DACE_ERROR, 47); daceCreateConstant(inc, 0.0); return; } #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif daceDivideDouble(ina, a0, inc); xf[0] = log(a0); xf[1] = 1.0; for(unsigned int i = 2; i < DACECom_t.nocut+1; i++) { xf[i] = -xf[i-1]/i*(i-1); } daceEvaluateSeries(inc, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the logarithm with respect to base b of a DA object. \param[in] ina Pointer to the DA object to operate on \param[in] b Base of the logarithm to use \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceLogarithmBase(const DACEDA *ina, const double b, DACEDA *inc) { if(b <= 0) { daceSetError(__func__, DACE_ERROR, 48); daceCreateConstant(inc, 0.0); return; } daceLogarithm(ina, inc); daceMultiplyDouble(inc, 1.0/log(b), inc); } /*! Compute the decadic logarithm of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceLogarithm10(const DACEDA *ina, DACEDA *inc) { daceLogarithmBase(ina, 10.0, inc); } /*! Compute the binary logarithm of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceLogarithm2(const DACEDA *ina, DACEDA *inc) { daceLogarithmBase(ina, 2.0, inc); } /*! Compute the sine of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceSine(const DACEDA *ina, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif const double a0 = daceGetConstant(ina); xf[0] = sin(a0); xf[1] = cos(a0); for(unsigned int i = 2; i < DACECom_t.nocut+1; i++) { xf[i] = -xf[i-2]/(i*(i-1)); } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the cosine of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceCosine(const DACEDA *ina, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif const double a0 = daceGetConstant(ina); xf[0] = cos(a0); xf[1] = -sin(a0); for(unsigned int i = 2; i < DACECom_t.nocut+1; i++) { xf[i] = -xf[i-2]/(i*(i-1)); } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the tangent of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceTangent(const DACEDA *ina, DACEDA *inc) { DACEDA itemp; if(cos(daceGetConstant(ina)) == 0.0) { daceSetError(__func__, DACE_ERROR, 49); daceCreateConstant(inc, 0.0); return; } daceAllocateDA(&itemp, 0); daceSine(ina, &itemp); daceCosine(ina, inc); daceDivide(&itemp, inc, inc); daceFreeDA(&itemp); } /*! Compute the arcsine of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceArcSine(const DACEDA *ina, DACEDA *inc) { DACEDA itemp; if(fabs(daceGetConstant(ina)) >= 1.0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } daceAllocateDA(&itemp, 0); daceSquare(ina, &itemp); daceDoubleSubtract(&itemp, 1.0, &itemp); daceSquareRoot(&itemp, &itemp); daceDivide(ina, &itemp, inc); daceArcTangent(inc, inc); daceFreeDA(&itemp); } /*! Compute the arccosine of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceArcCosine(const DACEDA *ina, DACEDA *inc) { if(fabs(daceGetConstant(ina)) >= 1.0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } daceArcSine(ina, inc); daceDoubleSubtract(inc, M_PI_2, inc); } /*! Compute the arctangent of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceArcTangent(const DACEDA *ina, DACEDA *inc) { DACEDA iarg; #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1] = {0}; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif const double a0 = daceGetConstant(ina); daceAllocateDA(&iarg, 0); daceMultiplyDouble(ina, a0, &iarg); daceAddDouble(&iarg, 1.0, &iarg); daceSubtractDouble(ina, a0, inc); daceDivide(inc, &iarg, &iarg); double s = 1.0; xf[0] = atan(a0); for(unsigned int i = 1; i < DACECom_t.nocut+1; i+=2) { xf[i] = s/i; s = -s; } daceEvaluateSeries(&iarg, xf, inc); daceFreeDA(&iarg); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Arctangent of ina/inb with proper sign in [-pi, pi]. This function follows the C standard atan2(y,x) function syntax. \param[in] ina Pointer to the first DA object to operate on \param[in] ina Pointer to the second DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceArcTangent2(const DACEDA *ina, const DACEDA *inb, DACEDA *inc) { const double cx = daceGetConstant(inb); const double cy = daceGetConstant(ina); if(cx == 0.0 && cy == 0.0) { daceCreateConstant(inc, 0.0); } else { if(fabs(cy) > fabs(cx)) { daceDivide(inb, ina, inc); daceArcTangent(inc, inc); if(cy < 0.0) { daceDoubleSubtract(inc, -M_PI_2, inc); } else { daceDoubleSubtract(inc, M_PI_2, inc); } } else { daceDivide(ina, inb, inc); daceArcTangent(inc, inc); if(cx < 0.0) { if(cy > 0.0) { daceAddDouble(inc, M_PI, inc); } else { daceAddDouble(inc, -M_PI, inc); } } } } } /*! Compute the hyperbolic sine of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceHyperbolicSine(const DACEDA *ina, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif const double a0 = daceGetConstant(ina); xf[0] = sinh(a0); xf[1] = cosh(a0); for(unsigned int i = 2; i < DACECom_t.nocut+1; i++) { xf[i] = xf[i-2]/(i*(i-1)); } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the hyperbolic cosine of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceHyperbolicCosine(const DACEDA *ina, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif const double a0 = daceGetConstant(ina); xf[0] = cosh(a0); xf[1] = sinh(a0); for(unsigned int i = 2; i < DACECom_t.nocut+1; i++) { xf[i] = xf[i-2]/(i*(i-1)); } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the hyperbolic tangent of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceHyperbolicTangent(const DACEDA *ina, DACEDA *inc) { DACEDA itemp; daceAllocateDA(&itemp, 0); daceHyperbolicSine(ina, &itemp); daceHyperbolicCosine(ina, inc); daceDivide(&itemp, inc, inc); daceFreeDA(&itemp); } /*! Compute the hyperbolic arcsince of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceHyperbolicArcSine(const DACEDA *ina, DACEDA *inc) { DACEDA itemp; daceAllocateDA(&itemp, 0); daceSquare(ina, inc); daceAddDouble(inc, 1.0, &itemp); daceSquareRoot(&itemp, inc); daceAdd(ina, inc, &itemp); daceLogarithm(&itemp, inc); daceFreeDA(&itemp); } /*! Compute the hyperbolic arccosine of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceHyperbolicArcCosine(const DACEDA *ina, DACEDA *inc) { DACEDA itemp; if(daceGetConstant(ina) < 1.0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } daceAllocateDA(&itemp, 0); daceSquare(ina, inc); daceSubtractDouble(inc, 1.0, &itemp); daceSquareRoot(&itemp, inc); daceAdd(ina, inc, &itemp); daceLogarithm(&itemp, inc); daceFreeDA(&itemp); } /*! Compute the hyperbolic arctangent of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceHyperbolicArcTangent(const DACEDA *ina, DACEDA *inc) { DACEDA itemp; if(fabs(daceGetConstant(ina)) >= 1.0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } daceAllocateDA(&itemp, 0); daceAddDouble(ina, 1.0, &itemp); daceDoubleSubtract(ina, 1.0, inc); daceDivide(&itemp, inc, inc); daceLogarithm(inc, &itemp); daceMultiplyDouble(&itemp, 0.5, inc); daceFreeDA(&itemp); } /*! Compute the error function of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceErrorFunction(const DACEDA *ina, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif const double a0 = daceGetConstant(ina); double factor = 2.0*exp(-a0*a0)/sqrt(M_PI); xf[0] = erf(a0); xf[1] = factor; double Hi2 = 1.0; // Hermite polynomial H_{i-2} = H_0 double Hi1 = 2.0*a0; // Hermite polynomial H_{i-1} = H_1 for(unsigned int i = 2; i < DACECom_t.nocut+1; i++) { factor /= -((double)i); xf[i] = factor*Hi1; const double temp = 2.0*a0*Hi1 - 2.0*(i-1)*Hi2; // recursion relation: H_i = 2*x*H_{i-1} - 2*(i-1)*H_{i-2} Hi2 = Hi1; Hi1 = temp; } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the complementary error function of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceComplementaryErrorFunction(const DACEDA *ina, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif const double a0 = daceGetConstant(ina); double factor = -2.0*exp(-a0*a0)/sqrt(M_PI); xf[0] = erfc(a0); xf[1] = factor; double Hi2 = 1.0; // Hermite polynomial H_{i-2} = H_0 double Hi1 = 2.0*a0; // Hermite polynomial H_{i-1} = H_1 for(unsigned int i = 2; i < DACECom_t.nocut+1; i++) { factor /= -((double)i); xf[i] = factor*Hi1; const double temp = 2.0*a0*Hi1 - 2.0*(i-1)*Hi2; // recursion relation: H_i = 2*x*H_{i-1} - 2*(i-1)*H_{i-2} Hi2 = Hi1; Hi1 = temp; } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /// @cond // Wrappers for contributed netlib Bessel functions (not for public use) /*! Compute value of Bessel functions J_n, Y_n for n in [n0, n1]. \param[in] x function argument (non-negative) \param[in] n0 Lowest order of the Bessel functions to calculate (n0 <= n1) \param[in] n1 Highest order of the Bessel functions to calculate (n0 <= n1) \param[in] type Type of function to evaluate: -1: Bessel J function 1: Bessel Y function \param[out] bz Array of size n1-n0+1 containing the values of B_{n0}, B_{n0+1}, ..., B_{n1} \return Returns 0 if all values are calculated accurately, -1 if x is too large to calculate the result or another error occured, or +1 if some of the results are of reduced accuracy. */ int BesselWrapper(const double x, const int n0, const int n1, const int type, double *bz) { long int nb = (abs(n0) > abs(n1) ? abs(n0) : abs(n1))+1, ncalc; double xx = x, alpha = 0.0; #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC #define DACE_STATIC_MAX_BESSEL_ORDER 100 if( DACE_STATIC_MAX_BESSEL_ORDER < nb ) return -1; double b[DACE_STATIC_MAX_BESSEL_ORDER]; #else double* b = (double*) dacecalloc(nb, sizeof(double)); #endif if(type < 0) rjbesl_(&xx, &alpha, &nb, b, &ncalc); else rybesl_(&xx, &alpha, &nb, b, &ncalc); // discombobulate results if(ncalc >= 0) { ncalc = (ncalc == nb ? 0 : 1); double s = (n0%2 == 0 ? 1.0 : -1.0); for(int i = n0; i <= n1; i++) { if(i >= 0) *(bz++) = b[i]; else { *(bz++) = s*b[-i]; // for integer orders considered here, (-1)^n J_n = J_{-n}, and (-1)^n Y_n = Y_{-n} s *= -1.0; } } } #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(b); #endif return ncalc < 0 ? -1 : ncalc; } /*! Compute value of modified Bessel functions I_n, K_n for n in [n0, n1]. \param[in] x function argument (non-negative) \param[in] n0 Lowest order of the Bessel functions to calculate (n0 <= n1) \param[in] n1 Highest order of the Bessel functions to calculate (n0 <= n1) \param[in] type Type of function to evaluate: -2: Bessel I function, scaled (i.e. exp(-x)*I_n(x)) -1: Bessel I function 1: Bessel K function 2: Bessel K function, scaled (i.e. exp(x)*K_n(x)) \param[out] bz Array of size n1-n0+1 containing the values of B_{n0}, B_{n0+1}, ..., B_{n1} \return Returns 0 if all values are calculated accurately, -1 if x is too large to calculate the result or another error occured, or +1 if some of the results are of reduced accuracy. */ int ModifiedBesselWrapper(const double x, const int n0, const int n1, const int type, double *bz) { long int nb = (abs(n0) > abs(n1) ? abs(n0) : abs(n1))+1, ize = abs(type), ncalc; double xx = x, alpha = 0.0; #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC #define DACE_STATIC_MAX_BESSEL_ORDER 100 if( DACE_STATIC_MAX_BESSEL_ORDER < nb ) return -1; double b[DACE_STATIC_MAX_BESSEL_ORDER]; #else double* b = (double*) dacecalloc(nb, sizeof(double)); #endif if(type < 0) ribesl_(&xx, &alpha, &nb, &ize, b, &ncalc); else rkbesl_(&xx, &alpha, &nb, &ize, b, &ncalc); // discombobulate results if(ncalc >= 0) { ncalc = (ncalc == nb ? 0 : 1); for(int i = n0; i <= n1; i++) *(bz++) = b[abs(i)]; // for integer orders considered here, I_n = I_{-n}, and for all orders K_n = K_{-n} } #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(b); #endif return ncalc < 0 ? -1 : ncalc; } /// @endcond /*! Compute the modified Bessel function I_n of a DA object. \param[in] ina Pointer to the DA object to operate on (constant part >= 0) \param[in] n Order of the Bessel function \param[in] scaled If true, the scaled Bessel function is computed (i.e. exp(-x)*I_n(x)) \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceBesselIFunction(const DACEDA *ina, const int n, const bool scaled, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 <= 0.0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double bz[2*DACE_STATIC_NOMAX+1]; #else double* bz = (double*) dacecalloc(2*DACECom_t.nocut+1, sizeof(double)); #endif const int res = ModifiedBesselWrapper(a0, n-DACECom_t.nocut, n+DACECom_t.nocut, scaled ? -2 : -1, bz); if(res >= 0) { if(scaled) daceEvaluateScaledModifiedBesselFunction(ina, bz, 1.0, inc); else daceEvaluateBesselFunction(ina, bz, 1.0, 1.0, inc); } else { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); } #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(bz); #endif } /*! Compute the modified Bessel function K_n of a DA object. \param[in] ina Pointer to the DA object to operate on (constant part >= 0) \param[in] n Order of the Bessel function \param[in] scaled If true, the scaled Bessel function is computed (i.e. exp(x)*K_n(x)) \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceBesselKFunction(const DACEDA *ina, const int n, const bool scaled, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 <= 0.0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double bz[2*DACE_STATIC_NOMAX+1]; #else double* bz = (double*) dacecalloc(2*DACECom_t.nocut+1, sizeof(double)); #endif const int res = ModifiedBesselWrapper(a0, n-DACECom_t.nocut, n+DACECom_t.nocut, scaled ? 2 : 1, bz); if(res >= 0) { if(scaled) daceEvaluateScaledModifiedBesselFunction(ina, bz, -1.0, inc); else daceEvaluateBesselFunction(ina, bz, 1.0, -1.0, inc); } else { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); } #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(bz); #endif } /*! Compute the Bessel function J_n of a DA object. \param[in] ina Pointer to the DA object to operate on (constant part >= 0) \param[in] n Order of the Bessel function \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceBesselJFunction(const DACEDA *ina, const int n, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 <= 0.0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double bz[2*DACE_STATIC_NOMAX+1]; #else double* bz = (double*) dacecalloc(2*DACECom_t.nocut+1, sizeof(double)); #endif const int res = BesselWrapper(a0, n-DACECom_t.nocut, n+DACECom_t.nocut, -1, bz); if(res >= 0) daceEvaluateBesselFunction(ina, bz, -1.0, 1.0, inc); else { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); } #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(bz); #endif } /*! Compute the Bessel function Y_n of a DA object. \param[in] ina Pointer to the DA object to operate on (constant part >= 0) \param[in] n Order of the Bessel function \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceBesselYFunction(const DACEDA *ina, const int n, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 <= 0.0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double bz[2*DACE_STATIC_NOMAX+1]; #else double* bz = (double*) dacecalloc(2*DACECom_t.nocut+1, sizeof(double)); #endif const int res = BesselWrapper(a0, n-DACECom_t.nocut, n+DACECom_t.nocut, 1, bz); if(res >= 0) daceEvaluateBesselFunction(ina, bz, -1.0, 1.0, inc); else { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); } #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(bz); #endif } /*! Evaluate a Bessel function with coefficients bz with the non-constant part of ina. \param[in] ina Pointer to the DA object to operate on \param[in] bz C array of 2*nocut+1 elements containing Bessel functions of orders n-nocut, ..., n+nocut \param[in] type Either -1.0 for normal Bessel functions, or +1.0 for modified Bessel functions. \param[in] ktype Either -1.0 for modified Bessel K function, or +1.0 for all other Bessel functions. \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceEvaluateBesselFunction(const DACEDA *ina, const double bz[], const double type, const double ktype, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; double binomial[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); double* binomial = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif xf[0] = bz[DACECom_t.nocut]; binomial[0] = 1.0; double factor = 1.0; for(unsigned int i = 1; i < DACECom_t.nocut+1; i++) { factor *= ktype*0.5/i; // calculate binomial coefficients i choose j based on previously calculated i-1 choose j. binomial[i] = 1.0; for(unsigned int j = i-1; j > 0; j--) binomial[j] += binomial[j-1]; // Calculate n-th derivative of Bessel function C, see http://dlmf.nist.gov/10.6 // bz contains values of C_{n-o} to C_{n+o} of constant part of ina double sign = 1.0, c = 0.0; xf[i] = 0.0; for(unsigned int j = 0; j <= i; j++) { // use Kahan summation, since signs oscillate and magnitudes can also vary greatly const double y = binomial[j]*sign*bz[DACECom_t.nocut-i+2*j] - c; const double t = xf[i] + y; c = (t - xf[i]) - y; xf[i] = t; // in infinite precision the above is equivalent to: // xf[i] += binomial[j]*sign*bz[DACECom_t.nocut-i+2*j]; sign *= type; } xf[i] *= factor; } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(binomial); dacefree(xf); #endif } /*! Evaluate a scaled modified Bessel function with coefficients bz with the non-constant part of ina. \param[in] ina Pointer to the DA object to operate on \param[in] bz C array of 2*nocut+1 elements containing modified Bessel functions of orders n-nocut, ..., n+nocut \param[in] ktype Either -1.0 for scaled Bessel K function, or +1.0 for scaled Bessel I function \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceEvaluateScaledModifiedBesselFunction(const DACEDA *ina, const double bz[], const double ktype, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; double binomial[2*DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); double* binomial = (double*) dacecalloc(2*DACECom_t.nocut+1, sizeof(double)); #endif xf[0] = bz[DACECom_t.nocut]; binomial[0] = 1.0; double factor = 1.0; for(unsigned int i = 1; i < DACECom_t.nocut+1; i++) { factor *= ktype*0.5/i; // calculate binomial coefficients 2*i-1 choose j based on previously calculated 2*i-2 choose j. binomial[2*i-1] = 1.0; for(unsigned int j = 2*i-2; j > 0; j--) binomial[j] += binomial[j-1]; // calculate binomial coefficients 2*i choose j based on previously calculated 2*i-1 choose j. binomial[2*i] = 1.0; for(unsigned int j = 2*i-1; j > 0; j--) binomial[j] += binomial[j-1]; // Calculate n-th derivative of Bessel function C // bz contains values of C_{n-o} to C_{n+o} of constant part of ina double sign = 1.0, c = 0.0; xf[i] = 0.0; for(unsigned int j = 0; j <= 2*i; j++) { // use Kahan summation, since signs oscillate and magnitudes can also vary greatly const double y = binomial[j]*sign*bz[DACECom_t.nocut-i+j] - c; const double t = xf[i] + y; c = (t - xf[i]) - y; xf[i] = t; // in infinite precision the above is equivalent to: // xf[i] += binomial[j]*sign*bz[DACECom_t.nocut-i+j]; sign *= -1.0; } xf[i] *= factor; } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(binomial); dacefree(xf); #endif } /*! Compute the partial Logarithmic Gamma function of a DA object (without constant part). \param[in] ina Pointer to the DA object to operate on (constant part != 0, -1, -2, ...) \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. \note No argument checking is performed to ensure values are within allowable range. */ void daceLogGammaFunction0(const DACEDA *ina, const double a0, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif xf[0] = 0.0; xf[1] = psi_(&a0); double s = 1.0; for(unsigned int i = 2; i < DACECom_t.nocut+1; i++) { xf[i] = (s/i)*zeta_(i, a0, NULL); s *= -1.0; } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the Logarithmic Gamma function of a DA object. \param[in] ina Pointer to the DA object to operate on (constant part != 0, -1, -2, ...) \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceLogGammaFunction(const DACEDA *ina, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 <= 0.0 && trunc(a0) == a0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } daceLogGammaFunction0(ina, a0, inc); daceSetCoefficient0(inc, 0, log(dgamma_(&a0))); } /*! Compute the Gamma function of a DA object. \param[in] ina Pointer to the DA object to operate on (constant part != 0, -1, -2, ...) \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceGammaFunction(const DACEDA *ina, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 <= 0.0 && trunc(a0) == a0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } daceLogGammaFunction0(ina, a0, inc); daceExponential(inc, inc); daceMultiplyDouble(inc, dgamma_(&a0), inc); } /*! Compute the n-th Psi function (i.e. the n+1 derivative of the logarithmic gamma function) of a DA object. \param[in] ina Pointer to the DA object to operate on (constant part != 0, -1, -2, ...) \param[in] n Order of the Psi function (n >= 0) \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void dacePsiFunction(const DACEDA *ina, const unsigned int n, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 <= 0.0 && trunc(a0) == a0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif if(n == 0) { xf[0] = psi_(&a0); double s = 1.0; for(unsigned int i = 1; i < DACECom_t.nocut+1; i++) { xf[i] = s*zeta_(i+1, a0, NULL); s *= -1.0; } } else { double fac = (n%2 ? 1.0 : -1.0); for(unsigned int i = 2; i <= n; i++) fac *= i; for(unsigned int i = 0; i < DACECom_t.nocut+1; i++) { xf[i] = fac*zeta_(n+i+1, a0, NULL); fac = -(fac/(i+1))*(n+i+1); } } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Evaluate a polynomial with coefficients xf with the non-constant part of ina. \param[in] ina Pointer to the DA object to operate on \param[in] xf C array of nocut+1 elements containing the coefficients of the polynomial \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceEvaluateSeries(const DACEDA *ina, const double xf[], DACEDA *inc) { DACEDA inon; const unsigned int nocut = DACECom_t.nocut; daceAllocateDA(&inon, 0); daceCopy(ina, &inon); daceSetCoefficient0(&inon, 0, 0.0); DACECom_t.nocut = 1; daceMultiplyDouble(&inon, xf[nocut], inc); daceAddDouble(inc, xf[nocut-1], inc); // evaluate series for(int i = nocut-2; i >= 0; i--) { DACECom_t.nocut = nocut-i; daceMultiply(&inon, inc, inc); daceAddDouble(inc, xf[i], inc); } DACECom_t.nocut = nocut; daceFreeDA(&inon); } /*! Compute the weighted sum of two DA objects. \param[in] ina Pointer to the first DA object to operate on \param[in] afac Weighting factor to multiply ina by \param[in] inb Pointer to the second DA object to operate on \param[in] bfac Weighting factor to multiply inb by \param[out] inc Pointer to the DA object to store the result in \note This routine is NOT aliasing safe! So inc MUST BE DIFFERENT from ina and inb. */ void daceWeightedSum(const DACEDA *ina, const double afac, const DACEDA *inb, const double bfac, DACEDA *inc) { monomial *ipoa; unsigned int ilma, illa; monomial *ipob; unsigned int ilmb, illb; monomial *ipoc; unsigned int ilmc, illc; daceVariableInformation(ina, &ipoa, &ilma, &illa); daceVariableInformation(inb, &ipob, &ilmb, &illb); daceVariableInformation(inc, &ipoc, &ilmc, &illc); monomial *ia = ipoa, *ib = ipob, *ic = ipoc; monomial *const iamax = ipoa+illa, *const ibmax = ipob+illb, *const icmax = ipoc+ilmc; if(illa > 0 && illb > 0) { // both polynomials have coefficients, merge until one runs out unsigned int ja = ia->ii; unsigned int jb = ib->ii; while(true) { if(ja == jb) { // add the two terms if(DACECom.ieo[ja] <= DACECom_t.nocut) { const double ccc = ia->cc*afac + ib->cc*bfac; if(fabs(ccc) >= DACECom_t.eps) { if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); daceSetLength(inc, ilmc); return; } ic->cc = ccc; ic->ii = ia->ii; ic++; } } ia++; ib++; if(ia >= iamax || ib >= ibmax) break; ja = ia->ii; jb = ib->ii; } else if(ja < jb) { // store term a if(DACECom.ieo[ja] <= DACECom_t.nocut) { const double ccc = ia->cc*afac; if(fabs(ccc) >= DACECom_t.eps) { if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); daceSetLength(inc, ilmc); return; } ic->cc = ccc; ic->ii = ia->ii; ic++; } } ia++; if(ia >= iamax) break; ja = ia->ii; } else { // store term b if(DACECom.ieo[jb] <= DACECom_t.nocut) { const double ccc = ib->cc*bfac; if(fabs(ccc) >= DACECom_t.eps) { if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); daceSetLength(inc, ilmc); return; } ic->cc = ccc; ic->ii = ib->ii; ic++; } } ib++; if(ib >= ibmax) break; jb = ib->ii; } } } // copy any remaining terms from either ina or inb monomial *ismin, *ismax; double fac; if(ia < iamax) { ismin = ia; ismax = iamax; fac = afac; } else { ismin = ib; ismax = ibmax; fac = bfac; } for(monomial *is = ismin; is < ismax; is++) { if(DACECom.ieo[is->ii] <= DACECom_t.nocut) { const double ccc = is->cc*fac; if(fabs(ccc) >= DACECom_t.eps) { if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); daceSetLength(inc, ilmc); return; } ic->cc = ccc; ic->ii = is->ii; ic++; } } } daceSetLength(inc, ic-ipoc); } /** @}*/
fx.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF X X % % F X X % % FFF X % % F X X % % F X X % % % % % % MagickCore Image Special Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate.h" #include "magick/annotate.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/decorate.h" #include "magick/distort.h" #include "magick/draw.h" #include "magick/effect.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/fx.h" #include "magick/fx-private.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/layer.h" #include "magick/list.h" #include "magick/log.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/opencl-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/random_.h" #include "magick/random-private.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resize.h" #include "magick/resource_.h" #include "magick/splay-tree.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/transform.h" #include "magick/utility.h" /* Define declarations. */ #define LeftShiftOperator 0xf5U #define RightShiftOperator 0xf6U #define LessThanEqualOperator 0xf7U #define GreaterThanEqualOperator 0xf8U #define EqualOperator 0xf9U #define NotEqualOperator 0xfaU #define LogicalAndOperator 0xfbU #define LogicalOrOperator 0xfcU #define ExponentialNotation 0xfdU struct _FxInfo { const Image *images; char *expression; FILE *file; SplayTreeInfo *colors, *symbols; CacheView **view; RandomInfo *random_info; ExceptionInfo *exception; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireFxInfo() allocates the FxInfo structure. % % The format of the AcquireFxInfo method is: % % FxInfo *AcquireFxInfo(Image *image,const char *expression) % % A description of each parameter follows: % % o image: the image. % % o expression: the expression. % */ MagickExport FxInfo *AcquireFxInfo(const Image *image,const char *expression) { char fx_op[2]; const Image *next; FxInfo *fx_info; register ssize_t i; fx_info=(FxInfo *) AcquireMagickMemory(sizeof(*fx_info)); if (fx_info == (FxInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(fx_info,0,sizeof(*fx_info)); fx_info->exception=AcquireExceptionInfo(); fx_info->images=image; fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishAlignedMemory); fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength( fx_info->images),sizeof(*fx_info->view)); if (fx_info->view == (CacheView **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); i=0; next=GetFirstImageInList(fx_info->images); for ( ; next != (Image *) NULL; next=next->next) { fx_info->view[i]=AcquireVirtualCacheView(next,fx_info->exception); i++; } fx_info->random_info=AcquireRandomInfo(); fx_info->expression=ConstantString(expression); fx_info->file=stderr; (void) SubstituteString(&fx_info->expression," ",""); /* compact string */ /* Force right-to-left associativity for unary negation. */ (void) SubstituteString(&fx_info->expression,"-","-1.0*"); (void) SubstituteString(&fx_info->expression,"^-1.0*","^-"); (void) SubstituteString(&fx_info->expression,"E-1.0*","E-"); (void) SubstituteString(&fx_info->expression,"e-1.0*","e-"); /* Convert compound to simple operators. */ fx_op[1]='\0'; *fx_op=(char) LeftShiftOperator; (void) SubstituteString(&fx_info->expression,"<<",fx_op); *fx_op=(char) RightShiftOperator; (void) SubstituteString(&fx_info->expression,">>",fx_op); *fx_op=(char) LessThanEqualOperator; (void) SubstituteString(&fx_info->expression,"<=",fx_op); *fx_op=(char) GreaterThanEqualOperator; (void) SubstituteString(&fx_info->expression,">=",fx_op); *fx_op=(char) EqualOperator; (void) SubstituteString(&fx_info->expression,"==",fx_op); *fx_op=(char) NotEqualOperator; (void) SubstituteString(&fx_info->expression,"!=",fx_op); *fx_op=(char) LogicalAndOperator; (void) SubstituteString(&fx_info->expression,"&&",fx_op); *fx_op=(char) LogicalOrOperator; (void) SubstituteString(&fx_info->expression,"||",fx_op); *fx_op=(char) ExponentialNotation; (void) SubstituteString(&fx_info->expression,"**",fx_op); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d d N o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AddNoiseImage() adds random noise to the image. % % The format of the AddNoiseImage method is: % % Image *AddNoiseImage(const Image *image,const NoiseType noise_type, % ExceptionInfo *exception) % Image *AddNoiseImageChannel(const Image *image,const ChannelType channel, % const NoiseType noise_type,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o noise_type: The type of noise: Uniform, Gaussian, Multiplicative, % Impulse, Laplacian, or Poisson. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type, ExceptionInfo *exception) { Image *noise_image; noise_image=AddNoiseImageChannel(image,DefaultChannels,noise_type,exception); return(noise_image); } MagickExport Image *AddNoiseImageChannel(const Image *image, const ChannelType channel,const NoiseType noise_type,ExceptionInfo *exception) { #define AddNoiseImageTag "AddNoise/Image" CacheView *image_view, *noise_view; const char *option; double attenuate; Image *noise_image; MagickBooleanType status; MagickOffsetType progress; RandomInfo **restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize noise image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); noise_image=AccelerateAddNoiseImage(image,channel,noise_type,exception); if (noise_image != (Image *) NULL) return(noise_image); noise_image=CloneImage(image,0,0,MagickTrue,exception); if (noise_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(noise_image,DirectClass) == MagickFalse) { InheritException(exception,&noise_image->exception); noise_image=DestroyImage(noise_image); return((Image *) NULL); } /* Add noise in each row. */ attenuate=1.0; option=GetImageArtifact(image,"attenuate"); if (option != (char *) NULL) attenuate=StringToDouble(option,(char **) NULL); status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #endif image_view=AcquireVirtualCacheView(image,exception); noise_view=AcquireAuthenticCacheView(noise_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,noise_image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict noise_indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1, exception); if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); noise_indexes=GetCacheViewAuthenticIndexQueue(noise_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(GenerateDifferentialNoise(random_info[id], GetPixelRed(p),noise_type,attenuate))); if (IsGrayColorspace(image->colorspace) != MagickFalse) { SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); } else { if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(GenerateDifferentialNoise( random_info[id],GetPixelGreen(p),noise_type,attenuate))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(GenerateDifferentialNoise( random_info[id],GetPixelBlue(p),noise_type,attenuate))); } if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(GenerateDifferentialNoise( random_info[id],GetPixelOpacity(p),noise_type,attenuate))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(noise_indexes+x,ClampToQuantum( GenerateDifferentialNoise(random_info[id],GetPixelIndex( indexes+x),noise_type,attenuate))); p++; q++; } sync=SyncCacheViewAuthenticPixels(noise_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AddNoiseImage) #endif proceed=SetImageProgress(image,AddNoiseImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } noise_view=DestroyCacheView(noise_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) noise_image=DestroyImage(noise_image); return(noise_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u e S h i f t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlueShiftImage() mutes the colors of the image to simulate a scene at % nighttime in the moonlight. % % The format of the BlueShiftImage method is: % % Image *BlueShiftImage(const Image *image,const double factor, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o factor: the shift factor. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlueShiftImage(const Image *image,const double factor, ExceptionInfo *exception) { #define BlueShiftImageTag "BlueShift/Image" CacheView *image_view, *shift_view; Image *shift_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate blue shift image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); shift_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (shift_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(shift_image,DirectClass) == MagickFalse) { InheritException(exception,&shift_image->exception); shift_image=DestroyImage(shift_image); return((Image *) NULL); } /* Blue-shift DirectClass image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); shift_view=AcquireAuthenticCacheView(shift_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,shift_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; Quantum quantum; register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { quantum=GetPixelRed(p); if (GetPixelGreen(p) < quantum) quantum=GetPixelGreen(p); if (GetPixelBlue(p) < quantum) quantum=GetPixelBlue(p); pixel.red=0.5*(GetPixelRed(p)+factor*quantum); pixel.green=0.5*(GetPixelGreen(p)+factor*quantum); pixel.blue=0.5*(GetPixelBlue(p)+factor*quantum); quantum=GetPixelRed(p); if (GetPixelGreen(p) > quantum) quantum=GetPixelGreen(p); if (GetPixelBlue(p) > quantum) quantum=GetPixelBlue(p); pixel.red=0.5*(pixel.red+factor*quantum); pixel.green=0.5*(pixel.green+factor*quantum); pixel.blue=0.5*(pixel.blue+factor*quantum); SetPixelRed(q,ClampToQuantum(pixel.red)); SetPixelGreen(q,ClampToQuantum(pixel.green)); SetPixelBlue(q,ClampToQuantum(pixel.blue)); p++; q++; } sync=SyncCacheViewAuthenticPixels(shift_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BlueShiftImage) #endif proceed=SetImageProgress(image,BlueShiftImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); shift_view=DestroyCacheView(shift_view); if (status == MagickFalse) shift_image=DestroyImage(shift_image); return(shift_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h a r c o a l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CharcoalImage() creates a new image that is a copy of an existing one with % the edge highlighted. It allocates the memory necessary for the new Image % structure and returns a pointer to the new image. % % The format of the CharcoalImage method is: % % Image *CharcoalImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CharcoalImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *charcoal_image, *clone_image, *edge_image; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); edge_image=EdgeImage(clone_image,radius,exception); clone_image=DestroyImage(clone_image); if (edge_image == (Image *) NULL) return((Image *) NULL); charcoal_image=BlurImage(edge_image,radius,sigma,exception); edge_image=DestroyImage(edge_image); if (charcoal_image == (Image *) NULL) return((Image *) NULL); (void) NormalizeImage(charcoal_image); (void) NegateImage(charcoal_image,MagickFalse); (void) GrayscaleImage(charcoal_image,image->intensity); return(charcoal_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorizeImage() blends the fill color with each pixel in the image. % A percentage blend is specified with opacity. Control the application % of different color components by specifying a different percentage for % each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue). % % The format of the ColorizeImage method is: % % Image *ColorizeImage(const Image *image,const char *opacity, % const PixelPacket colorize,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o opacity: A character string indicating the level of opacity as a % percentage. % % o colorize: A color value. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ColorizeImage(const Image *image,const char *opacity, const PixelPacket colorize,ExceptionInfo *exception) { #define ColorizeImageTag "Colorize/Image" CacheView *colorize_view, *image_view; GeometryInfo geometry_info; Image *colorize_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket pixel; MagickStatusType flags; ssize_t y; /* Allocate colorized image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); colorize_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (colorize_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(colorize_image,DirectClass) == MagickFalse) { InheritException(exception,&colorize_image->exception); colorize_image=DestroyImage(colorize_image); return((Image *) NULL); } if ((IsGrayColorspace(image->colorspace) != MagickFalse) || (IsPixelGray(&colorize) != MagickFalse)) (void) SetImageColorspace(colorize_image,sRGBColorspace); if ((colorize_image->matte == MagickFalse) && (colorize.opacity != OpaqueOpacity)) (void) SetImageAlphaChannel(colorize_image,OpaqueAlphaChannel); if (opacity == (const char *) NULL) return(colorize_image); /* Determine RGB values of the pen color. */ flags=ParseGeometry(opacity,&geometry_info); pixel.red=geometry_info.rho; pixel.green=geometry_info.rho; pixel.blue=geometry_info.rho; pixel.opacity=(MagickRealType) OpaqueOpacity; if ((flags & SigmaValue) != 0) pixel.green=geometry_info.sigma; if ((flags & XiValue) != 0) pixel.blue=geometry_info.xi; if ((flags & PsiValue) != 0) pixel.opacity=geometry_info.psi; /* Colorize DirectClass image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); colorize_view=AcquireAuthenticCacheView(colorize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,colorize_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(colorize_view,0,y,colorize_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,((GetPixelRed(p)*(100.0-pixel.red)+ colorize.red*pixel.red)/100.0)); SetPixelGreen(q,((GetPixelGreen(p)*(100.0-pixel.green)+ colorize.green*pixel.green)/100.0)); SetPixelBlue(q,((GetPixelBlue(p)*(100.0-pixel.blue)+ colorize.blue*pixel.blue)/100.0)); if (colorize_image->matte == MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); else SetPixelOpacity(q,((GetPixelOpacity(p)*(100.0-pixel.opacity)+ colorize.opacity*pixel.opacity)/100.0)); p++; q++; } sync=SyncCacheViewAuthenticPixels(colorize_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorizeImage) #endif proceed=SetImageProgress(image,ColorizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); colorize_view=DestroyCacheView(colorize_view); if (status == MagickFalse) colorize_image=DestroyImage(colorize_image); return(colorize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r M a t r i x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorMatrixImage() applies color transformation to an image. This method % permits saturation changes, hue rotation, luminance to alpha, and various % other effects. Although variable-sized transformation matrices can be used, % typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA % (or RGBA with offsets). The matrix is similar to those used by Adobe Flash % except offsets are in column 6 rather than 5 (in support of CMYKA images) % and offsets are normalized (divide Flash offset by 255). % % The format of the ColorMatrixImage method is: % % Image *ColorMatrixImage(const Image *image, % const KernelInfo *color_matrix,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o color_matrix: the color matrix. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ColorMatrixImage(const Image *image, const KernelInfo *color_matrix,ExceptionInfo *exception) { #define ColorMatrixImageTag "ColorMatrix/Image" CacheView *color_view, *image_view; double ColorMatrix[6][6] = { { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, { 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 }, { 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 }, { 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 } }; Image *color_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t u, v, y; /* Create color matrix. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); i=0; for (v=0; v < (ssize_t) color_matrix->height; v++) for (u=0; u < (ssize_t) color_matrix->width; u++) { if ((v < 6) && (u < 6)) ColorMatrix[v][u]=color_matrix->values[i]; i++; } /* Initialize color image. */ color_image=CloneImage(image,0,0,MagickTrue,exception); if (color_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(color_image,DirectClass) == MagickFalse) { InheritException(exception,&color_image->exception); color_image=DestroyImage(color_image); return((Image *) NULL); } if (image->debug != MagickFalse) { char format[MaxTextExtent], *message; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " ColorMatrix image with color matrix:"); message=AcquireString(""); for (v=0; v < 6; v++) { *message='\0'; (void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < 6; u++) { (void) FormatLocaleString(format,MaxTextExtent,"%+f ", ColorMatrix[v][u]); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } /* ColorMatrix image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); color_view=AcquireAuthenticCacheView(color_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,color_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickRealType pixel; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; register IndexPacket *restrict color_indexes; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); color_indexes=GetCacheViewAuthenticIndexQueue(color_view); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t v; size_t height; height=color_matrix->height > 6 ? 6UL : color_matrix->height; for (v=0; v < (ssize_t) height; v++) { pixel=ColorMatrix[v][0]*GetPixelRed(p)+ColorMatrix[v][1]* GetPixelGreen(p)+ColorMatrix[v][2]*GetPixelBlue(p); if (image->matte != MagickFalse) pixel+=ColorMatrix[v][3]*(QuantumRange-GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) pixel+=ColorMatrix[v][4]*GetPixelIndex(indexes+x); pixel+=QuantumRange*ColorMatrix[v][5]; switch (v) { case 0: SetPixelRed(q,ClampToQuantum(pixel)); break; case 1: SetPixelGreen(q,ClampToQuantum(pixel)); break; case 2: SetPixelBlue(q,ClampToQuantum(pixel)); break; case 3: { if (image->matte != MagickFalse) SetPixelAlpha(q,ClampToQuantum(pixel)); break; } case 4: { if (image->colorspace == CMYKColorspace) SetPixelIndex(color_indexes+x,ClampToQuantum(pixel)); break; } } } p++; q++; } if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorMatrixImage) #endif proceed=SetImageProgress(image,ColorMatrixImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } color_view=DestroyCacheView(color_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) color_image=DestroyImage(color_image); return(color_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyFxInfo() deallocates memory associated with an FxInfo structure. % % The format of the DestroyFxInfo method is: % % ImageInfo *DestroyFxInfo(ImageInfo *fx_info) % % A description of each parameter follows: % % o fx_info: the fx info. % */ MagickExport FxInfo *DestroyFxInfo(FxInfo *fx_info) { register ssize_t i; fx_info->exception=DestroyExceptionInfo(fx_info->exception); fx_info->expression=DestroyString(fx_info->expression); fx_info->symbols=DestroySplayTree(fx_info->symbols); fx_info->colors=DestroySplayTree(fx_info->colors); for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--) fx_info->view[i]=DestroyCacheView(fx_info->view[i]); fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view); fx_info->random_info=DestroyRandomInfo(fx_info->random_info); fx_info=(FxInfo *) RelinquishMagickMemory(fx_info); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F x E v a l u a t e C h a n n e l E x p r e s s i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxEvaluateChannelExpression() evaluates an expression and returns the % results. % % The format of the FxEvaluateExpression method is: % % MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info, % const ChannelType channel,const ssize_t x,const ssize_t y, % double *alpha,Exceptioninfo *exception) % MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,double *alpha, % Exceptioninfo *exception) % % A description of each parameter follows: % % o fx_info: the fx info. % % o channel: the channel. % % o x,y: the pixel position. % % o alpha: the result. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickMax(const double x,const double y) { if (x > y) return(x); return(y); } static double FxChannelStatistics(FxInfo *fx_info,const Image *image, ChannelType channel,const char *symbol,ExceptionInfo *exception) { char channel_symbol[MaxTextExtent], key[MaxTextExtent], statistic[MaxTextExtent]; const char *value; register const char *p; for (p=symbol; (*p != '.') && (*p != '\0'); p++) ; *channel_symbol='\0'; if (*p == '.') { ssize_t option; (void) CopyMagickString(channel_symbol,p+1,MaxTextExtent); option=ParseCommandOption(MagickChannelOptions,MagickTrue,channel_symbol); if (option >= 0) channel=(ChannelType) option; } (void) FormatLocaleString(key,MaxTextExtent,"%p.%.20g.%s",(void *) image, (double) channel,symbol); value=(const char *) GetValueFromSplayTree(fx_info->symbols,key); if (value != (const char *) NULL) return(QuantumScale*StringToDouble(value,(char **) NULL)); (void) DeleteNodeFromSplayTree(fx_info->symbols,key); if (LocaleNCompare(symbol,"depth",5) == 0) { size_t depth; depth=GetImageChannelDepth(image,channel,exception); (void) FormatLocaleString(statistic,MaxTextExtent,"%.20g",(double) depth); } if (LocaleNCompare(symbol,"kurtosis",8) == 0) { double kurtosis, skewness; (void) GetImageChannelKurtosis(image,channel,&kurtosis,&skewness, exception); (void) FormatLocaleString(statistic,MaxTextExtent,"%g",kurtosis); } if (LocaleNCompare(symbol,"maxima",6) == 0) { double maxima, minima; (void) GetImageChannelRange(image,channel,&minima,&maxima,exception); if (LocaleCompare(channel_symbol,"a") == 0) maxima=QuantumRange-maxima; (void) FormatLocaleString(statistic,MaxTextExtent,"%g",maxima); } if (LocaleNCompare(symbol,"mean",4) == 0) { double mean, standard_deviation; (void) GetImageChannelMean(image,channel,&mean,&standard_deviation, exception); if (LocaleCompare(channel_symbol,"a") == 0) mean=QuantumRange-mean; (void) FormatLocaleString(statistic,MaxTextExtent,"%g",mean); } if (LocaleNCompare(symbol,"minima",6) == 0) { double maxima, minima; (void) GetImageChannelRange(image,channel,&minima,&maxima,exception); if (LocaleCompare(channel_symbol,"a") == 0) minima=QuantumRange-minima; (void) FormatLocaleString(statistic,MaxTextExtent,"%g",minima); } if (LocaleNCompare(symbol,"skewness",8) == 0) { double kurtosis, skewness; (void) GetImageChannelKurtosis(image,channel,&kurtosis,&skewness, exception); (void) FormatLocaleString(statistic,MaxTextExtent,"%g",skewness); } if (LocaleNCompare(symbol,"standard_deviation",18) == 0) { double mean, standard_deviation; (void) GetImageChannelMean(image,channel,&mean,&standard_deviation, exception); (void) FormatLocaleString(statistic,MaxTextExtent,"%g", standard_deviation); } (void) AddValueToSplayTree(fx_info->symbols,ConstantString(key), ConstantString(statistic)); return(QuantumScale*StringToDouble(statistic,(char **) NULL)); } static double FxEvaluateSubexpression(FxInfo *,const ChannelType,const ssize_t, const ssize_t,const char *,double *,ExceptionInfo *); static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta) { if (beta != 0) return(FxGCD(beta,alpha % beta)); return(alpha); } static inline const char *FxSubexpression(const char *expression, ExceptionInfo *exception) { const char *subexpression; register ssize_t level; level=0; subexpression=expression; while ((*subexpression != '\0') && ((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL))) { if (strchr("(",(int) *subexpression) != (char *) NULL) level++; else if (strchr(")",(int) *subexpression) != (char *) NULL) level--; subexpression++; } if (*subexpression == '\0') (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnbalancedParenthesis","`%s'",expression); return(subexpression); } static double FxGetSymbol(FxInfo *fx_info,const ChannelType channel, const ssize_t x,const ssize_t y,const char *expression, ExceptionInfo *exception) { char *q, subexpression[MaxTextExtent], symbol[MaxTextExtent]; const char *p, *value; double alpha, beta; Image *image; MagickPixelPacket pixel; PointInfo point; register ssize_t i; size_t length; size_t level; p=expression; i=GetImageIndexInList(fx_info->images); level=0; point.x=(double) x; point.y=(double) y; if (isalpha((int) ((unsigned char) *(p+1))) == 0) { if (strchr("suv",(int) *p) != (char *) NULL) { switch (*p) { case 's': default: { i=GetImageIndexInList(fx_info->images); break; } case 'u': i=0; break; case 'v': i=1; break; } p++; if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, &beta,exception); i=(ssize_t) (alpha+0.5); p++; } if (*p == '.') p++; } if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0)) { p++; if (*p == '{') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '{') level++; else if (*p == '}') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, &beta,exception); point.x=alpha; point.y=beta; p++; } else if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, &beta,exception); point.x+=alpha; point.y+=beta; p++; } if (*p == '.') p++; } } length=GetImageListLength(fx_info->images); while (i < 0) i+=(ssize_t) length; if (length != 0) i%=length; image=GetImageFromList(fx_info->images,i); if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "NoSuchImage","`%s'",expression); return(0.0); } GetMagickPixelPacket(image,&pixel); (void) InterpolateMagickPixelPacket(image,fx_info->view[i],image->interpolate, point.x,point.y,&pixel,exception); if ((strlen(p) > 2) && (LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luma") != 0) && (LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) && (LocaleCompare(p,"saturation") != 0) && (LocaleCompare(p,"lightness") != 0)) { char name[MaxTextExtent]; (void) CopyMagickString(name,p,MaxTextExtent); for (q=name+(strlen(name)-1); q > name; q--) { if (*q == ')') break; if (*q == '.') { *q='\0'; break; } } if ((strlen(name) > 2) && (GetValueFromSplayTree(fx_info->symbols,name) == (const char *) NULL)) { MagickPixelPacket *color; color=(MagickPixelPacket *) GetValueFromSplayTree(fx_info->colors, name); if (color != (MagickPixelPacket *) NULL) { pixel=(*color); p+=strlen(name); } else if (QueryMagickColor(name,&pixel,fx_info->exception) != MagickFalse) { (void) AddValueToSplayTree(fx_info->colors,ConstantString(name), CloneMagickPixelPacket(&pixel)); p+=strlen(name); } } } (void) CopyMagickString(symbol,p,MaxTextExtent); StripString(symbol); if (*symbol == '\0') { switch (channel) { case RedChannel: return(QuantumScale*pixel.red); case GreenChannel: return(QuantumScale*pixel.green); case BlueChannel: return(QuantumScale*pixel.blue); case OpacityChannel: { double alpha; if (pixel.matte == MagickFalse) return(1.0); alpha=(double) (QuantumScale*GetPixelAlpha(&pixel)); return(alpha); } case IndexChannel: { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), ImageError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.index); } case DefaultChannels: { return(QuantumScale*MagickPixelIntensityToQuantum(&pixel)); } default: break; } (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",p); return(0.0); } switch (*symbol) { case 'A': case 'a': { if (LocaleCompare(symbol,"a") == 0) return((double) (QuantumScale*GetPixelAlpha(&pixel))); break; } case 'B': case 'b': { if (LocaleCompare(symbol,"b") == 0) return(QuantumScale*pixel.blue); break; } case 'C': case 'c': { if (LocaleNCompare(symbol,"channel",7) == 0) { GeometryInfo channel_info; MagickStatusType flags; flags=ParseGeometry(symbol+7,&channel_info); if (image->colorspace == CMYKColorspace) switch (channel) { case CyanChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case MagentaChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case YellowChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } case OpacityChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } default: return(0.0); } switch (channel) { case RedChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case GreenChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case BlueChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case OpacityChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } case IndexChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } default: return(0.0); } } if (LocaleCompare(symbol,"c") == 0) return(QuantumScale*pixel.red); break; } case 'D': case 'd': { if (LocaleNCompare(symbol,"depth",5) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'G': case 'g': { if (LocaleCompare(symbol,"g") == 0) return(QuantumScale*pixel.green); break; } case 'K': case 'k': { if (LocaleNCompare(symbol,"kurtosis",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"k") == 0) { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.index); } break; } case 'H': case 'h': { if (LocaleCompare(symbol,"h") == 0) return((double) image->rows); if (LocaleCompare(symbol,"hue") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green), ClampToQuantum(pixel.blue),&hue,&saturation,&lightness); return(hue); } break; } case 'I': case 'i': { if ((LocaleCompare(symbol,"image.depth") == 0) || (LocaleCompare(symbol,"image.minima") == 0) || (LocaleCompare(symbol,"image.maxima") == 0) || (LocaleCompare(symbol,"image.mean") == 0) || (LocaleCompare(symbol,"image.kurtosis") == 0) || (LocaleCompare(symbol,"image.skewness") == 0) || (LocaleCompare(symbol,"image.standard_deviation") == 0)) return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception)); if (LocaleCompare(symbol,"image.resolution.x") == 0) return(image->x_resolution); if (LocaleCompare(symbol,"image.resolution.y") == 0) return(image->y_resolution); if (LocaleCompare(symbol,"intensity") == 0) return(QuantumScale*MagickPixelIntensityToQuantum(&pixel)); if (LocaleCompare(symbol,"i") == 0) return((double) x); break; } case 'J': case 'j': { if (LocaleCompare(symbol,"j") == 0) return((double) y); break; } case 'L': case 'l': { if (LocaleCompare(symbol,"lightness") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green), ClampToQuantum(pixel.blue),&hue,&saturation,&lightness); return(lightness); } if (LocaleCompare(symbol,"luma") == 0) { double luma; luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luma); } if (LocaleCompare(symbol,"luminance") == 0) { double luminance; luminance=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luminance); } break; } case 'M': case 'm': { if (LocaleNCompare(symbol,"maxima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"mean",4) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"minima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"m") == 0) return(QuantumScale*pixel.green); break; } case 'N': case 'n': { if (LocaleCompare(symbol,"n") == 0) return((double) GetImageListLength(fx_info->images)); break; } case 'O': case 'o': { if (LocaleCompare(symbol,"o") == 0) return(QuantumScale*pixel.opacity); break; } case 'P': case 'p': { if (LocaleCompare(symbol,"page.height") == 0) return((double) image->page.height); if (LocaleCompare(symbol,"page.width") == 0) return((double) image->page.width); if (LocaleCompare(symbol,"page.x") == 0) return((double) image->page.x); if (LocaleCompare(symbol,"page.y") == 0) return((double) image->page.y); break; } case 'R': case 'r': { if (LocaleCompare(symbol,"resolution.x") == 0) return(image->x_resolution); if (LocaleCompare(symbol,"resolution.y") == 0) return(image->y_resolution); if (LocaleCompare(symbol,"r") == 0) return(QuantumScale*pixel.red); break; } case 'S': case 's': { if (LocaleCompare(symbol,"saturation") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green), ClampToQuantum(pixel.blue),&hue,&saturation,&lightness); return(saturation); } if (LocaleNCompare(symbol,"skewness",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"standard_deviation",18) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'T': case 't': { if (LocaleCompare(symbol,"t") == 0) return((double) GetImageIndexInList(fx_info->images)); break; } case 'W': case 'w': { if (LocaleCompare(symbol,"w") == 0) return((double) image->columns); break; } case 'Y': case 'y': { if (LocaleCompare(symbol,"y") == 0) return(QuantumScale*pixel.blue); break; } case 'Z': case 'z': { if (LocaleCompare(symbol,"z") == 0) { double depth; depth=(double) GetImageChannelDepth(image,channel,fx_info->exception); return(depth); } break; } default: break; } value=(const char *) GetValueFromSplayTree(fx_info->symbols,symbol); if (value != (const char *) NULL) return(StringToDouble(value,(char **) NULL)); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",symbol); return(0.0); } static const char *FxOperatorPrecedence(const char *expression, ExceptionInfo *exception) { typedef enum { UndefinedPrecedence, NullPrecedence, BitwiseComplementPrecedence, ExponentPrecedence, ExponentialNotationPrecedence, MultiplyPrecedence, AdditionPrecedence, ShiftPrecedence, RelationalPrecedence, EquivalencyPrecedence, BitwiseAndPrecedence, BitwiseOrPrecedence, LogicalAndPrecedence, LogicalOrPrecedence, TernaryPrecedence, AssignmentPrecedence, CommaPrecedence, SeparatorPrecedence } FxPrecedence; FxPrecedence precedence, target; register const char *subexpression; register int c; size_t level; c=0; level=0; subexpression=(const char *) NULL; target=NullPrecedence; while (*expression != '\0') { precedence=UndefinedPrecedence; if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@')) { expression++; continue; } switch (*expression) { case 'A': case 'a': { #if defined(MAGICKCORE_HAVE_ACOSH) if (LocaleNCompare(expression,"acosh",5) == 0) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (LocaleNCompare(expression,"asinh",5) == 0) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ATANH) if (LocaleNCompare(expression,"atanh",5) == 0) { expression+=5; break; } #endif if (LocaleNCompare(expression,"atan2",5) == 0) { expression+=5; break; } break; } case 'E': case 'e': { if ((isdigit((int) ((unsigned char) c)) != 0) && ((LocaleNCompare(expression,"E+",2) == 0) || (LocaleNCompare(expression,"E-",2) == 0))) { expression+=2; /* scientific notation */ break; } } case 'J': case 'j': { if ((LocaleNCompare(expression,"j0",2) == 0) || (LocaleNCompare(expression,"j1",2) == 0)) { expression+=2; break; } break; } case '#': { while (isxdigit((int) ((unsigned char) *(expression+1))) != 0) expression++; break; } default: break; } if ((c == (int) '{') || (c == (int) '[')) level++; else if ((c == (int) '}') || (c == (int) ']')) level--; if (level == 0) switch ((unsigned char) *expression) { case '~': case '!': { precedence=BitwiseComplementPrecedence; break; } case '^': case '@': { precedence=ExponentPrecedence; break; } default: { if (((c != 0) && ((isdigit((int) ((unsigned char) c)) != 0) || (strchr(")",(int) ((unsigned char) c)) != (char *) NULL))) && (((islower((int) ((unsigned char) *expression)) != 0) || (strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) || ((isdigit((int) ((unsigned char) c)) == 0) && (isdigit((int) ((unsigned char) *expression)) != 0))) && (strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL)) precedence=MultiplyPrecedence; break; } case '*': case '/': case '%': { precedence=MultiplyPrecedence; break; } case '+': case '-': { if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) || (isalpha(c) != 0)) precedence=AdditionPrecedence; break; } case LeftShiftOperator: case RightShiftOperator: { precedence=ShiftPrecedence; break; } case '<': case LessThanEqualOperator: case GreaterThanEqualOperator: case '>': { precedence=RelationalPrecedence; break; } case EqualOperator: case NotEqualOperator: { precedence=EquivalencyPrecedence; break; } case '&': { precedence=BitwiseAndPrecedence; break; } case '|': { precedence=BitwiseOrPrecedence; break; } case LogicalAndOperator: { precedence=LogicalAndPrecedence; break; } case LogicalOrOperator: { precedence=LogicalOrPrecedence; break; } case ExponentialNotation: { precedence=ExponentialNotationPrecedence; break; } case ':': case '?': { precedence=TernaryPrecedence; break; } case '=': { precedence=AssignmentPrecedence; break; } case ',': { precedence=CommaPrecedence; break; } case ';': { precedence=SeparatorPrecedence; break; } } if ((precedence == BitwiseComplementPrecedence) || (precedence == TernaryPrecedence) || (precedence == AssignmentPrecedence)) { if (precedence > target) { /* Right-to-left associativity. */ target=precedence; subexpression=expression; } } else if (precedence >= target) { /* Left-to-right associativity. */ target=precedence; subexpression=expression; } if (strchr("(",(int) *expression) != (char *) NULL) expression=FxSubexpression(expression,exception); c=(int) (*expression++); } return(subexpression); } static double FxEvaluateSubexpression(FxInfo *fx_info,const ChannelType channel, const ssize_t x,const ssize_t y,const char *expression,double *beta, ExceptionInfo *exception) { char *q, subexpression[MaxTextExtent]; double alpha, gamma; register const char *p; *beta=0.0; if (exception->severity != UndefinedException) return(0.0); while (isspace((int) ((unsigned char) *expression)) != 0) expression++; if (*expression == '\0') return(0.0); *subexpression='\0'; p=FxOperatorPrecedence(expression,exception); if (p != (const char *) NULL) { (void) CopyMagickString(subexpression,expression,(size_t) (p-expression+1)); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,beta, exception); switch ((unsigned char) *p) { case '~': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); *beta=(double) (~(size_t) *beta); return(*beta); } case '!': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(*beta == 0.0 ? 1.0 : 0.0); } case '^': { *beta=pow((double) alpha,(double) FxEvaluateSubexpression(fx_info, channel,x,y,++p,beta,exception)); return(*beta); } case '*': case ExponentialNotation: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(alpha*(*beta)); } case '/': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); if (*beta == 0.0) { if (exception->severity == UndefinedException) (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"DivideByZero","`%s'",expression); return(0.0); } return(alpha/(*beta)); } case '%': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); *beta=fabs(floor(((double) *beta)+0.5)); if (*beta == 0.0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"DivideByZero","`%s'",expression); return(0.0); } return(fmod((double) alpha,(double) *beta)); } case '+': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(alpha+(*beta)); } case '-': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(alpha-(*beta)); } case LeftShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); *beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5)); return(*beta); } case RightShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); *beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5)); return(*beta); } case '<': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(alpha < *beta ? 1.0 : 0.0); } case LessThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(alpha <= *beta ? 1.0 : 0.0); } case '>': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(alpha > *beta ? 1.0 : 0.0); } case GreaterThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(alpha >= *beta ? 1.0 : 0.0); } case EqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0); } case NotEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0); } case '&': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); *beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5)); return(*beta); } case '|': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); *beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5)); return(*beta); } case LogicalAndOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); *beta=(alpha > 0.0) && (gamma > 0.0) ? 1.0 : 0.0; return(*beta); } case LogicalOrOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); *beta=(alpha > 0.0) || (gamma > 0.0) ? 1.0 : 0.0; return(*beta); } case '?': { double gamma; (void) CopyMagickString(subexpression,++p,MaxTextExtent); q=subexpression; p=StringToken(":",&q); if (q == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); return(0.0); } if (fabs((double) alpha) >= MagickEpsilon) gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,beta,exception); else gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q,beta,exception); return(gamma); } case '=': { char numeric[MaxTextExtent]; q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); return(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); (void) FormatLocaleString(numeric,MaxTextExtent,"%g",(double) *beta); (void) DeleteNodeFromSplayTree(fx_info->symbols,subexpression); (void) AddValueToSplayTree(fx_info->symbols,ConstantString( subexpression),ConstantString(numeric)); return(*beta); } case ',': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(alpha); } case ';': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(*beta); } default: { gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,p,beta, exception); return(gamma); } } } if (strchr("(",(int) *expression) != (char *) NULL) { (void) CopyMagickString(subexpression,expression+1,MaxTextExtent); subexpression[strlen(subexpression)-1]='\0'; gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,beta, exception); return(gamma); } switch (*expression) { case '+': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,beta, exception); return(1.0*gamma); } case '-': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,beta, exception); return(-1.0*gamma); } case '~': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,beta, exception); return((double) (~(size_t) (gamma+0.5))); } case 'A': case 'a': { if (LocaleNCompare(expression,"abs",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return(fabs((double) alpha)); } #if defined(MAGICKCORE_HAVE_ACOSH) if (LocaleNCompare(expression,"acosh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); return(acosh((double) alpha)); } #endif if (LocaleNCompare(expression,"acos",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return(acos((double) alpha)); } #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"airy",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); if (alpha == 0.0) return(1.0); gamma=2.0*j1((double) (MagickPI*alpha))/(MagickPI*alpha); return(gamma*gamma); } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (LocaleNCompare(expression,"asinh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); return(asinh((double) alpha)); } #endif if (LocaleNCompare(expression,"asin",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return(asin((double) alpha)); } if (LocaleNCompare(expression,"alt",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0); } if (LocaleNCompare(expression,"atan2",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); return(atan2((double) alpha,(double) *beta)); } #if defined(MAGICKCORE_HAVE_ATANH) if (LocaleNCompare(expression,"atanh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); return(atanh((double) alpha)); } #endif if (LocaleNCompare(expression,"atan",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return(atan((double) alpha)); } if (LocaleCompare(expression,"a") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'B': case 'b': { if (LocaleCompare(expression,"b") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'C': case 'c': { if (LocaleNCompare(expression,"ceil",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return(ceil((double) alpha)); } if (LocaleNCompare(expression,"cosh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return(cosh((double) alpha)); } if (LocaleNCompare(expression,"cos",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return(cos((double) alpha)); } if (LocaleCompare(expression,"c") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'D': case 'd': { if (LocaleNCompare(expression,"debug",5) == 0) { const char *type; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); if (fx_info->images->colorspace == CMYKColorspace) switch (channel) { case CyanChannel: type="cyan"; break; case MagentaChannel: type="magenta"; break; case YellowChannel: type="yellow"; break; case OpacityChannel: type="opacity"; break; case BlackChannel: type="black"; break; default: type="unknown"; break; } else switch (channel) { case RedChannel: type="red"; break; case GreenChannel: type="green"; break; case BlueChannel: type="blue"; break; case OpacityChannel: type="opacity"; break; default: type="unknown"; break; } (void) CopyMagickString(subexpression,expression+6,MaxTextExtent); if (strlen(subexpression) > 1) subexpression[strlen(subexpression)-1]='\0'; if (fx_info->file != (FILE *) NULL) (void) FormatLocaleFile(fx_info->file, "%s[%.20g,%.20g].%s: %s=%.*g\n",fx_info->images->filename, (double) x,(double) y,type,subexpression,GetMagickPrecision(), (double) alpha); return(0.0); } if (LocaleNCompare(expression,"drc",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return((alpha/(*beta*(alpha-1.0)+1.0))); } break; } case 'E': case 'e': { if (LocaleCompare(expression,"epsilon") == 0) return(MagickEpsilon); if (LocaleNCompare(expression,"exp",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return(exp((double) alpha)); } if (LocaleCompare(expression,"e") == 0) return(2.7182818284590452354); break; } case 'F': case 'f': { if (LocaleNCompare(expression,"floor",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); return(floor((double) alpha)); } break; } case 'G': case 'g': { if (LocaleNCompare(expression,"gauss",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); gamma=exp((double) (-alpha*alpha/2.0))/sqrt(2.0*MagickPI); return(gamma); } if (LocaleNCompare(expression,"gcd",3) == 0) { MagickOffsetType gcd; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType) (*beta+0.5)); return((double) gcd); } if (LocaleCompare(expression,"g") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'H': case 'h': { if (LocaleCompare(expression,"h") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleCompare(expression,"hue") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleNCompare(expression,"hypot",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); return(hypot((double) alpha,(double) *beta)); } break; } case 'K': case 'k': { if (LocaleCompare(expression,"k") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'I': case 'i': { if (LocaleCompare(expression,"intensity") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleNCompare(expression,"int",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return(floor(alpha)); } #if defined(MAGICKCORE_HAVE_ISNAN) if (LocaleNCompare(expression,"isnan",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); return((double) !!isnan((double) alpha)); } #endif if (LocaleCompare(expression,"i") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'J': case 'j': { if (LocaleCompare(expression,"j") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); #if defined(MAGICKCORE_HAVE_J0) if (LocaleNCompare(expression,"j0",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,beta, exception); return(j0((double) alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"j1",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,beta, exception); return(j1((double) alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"jinc",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); if (alpha == 0.0) return(1.0); gamma=(2.0*j1((double) (MagickPI*alpha))/(MagickPI*alpha)); return(gamma); } #endif break; } case 'L': case 'l': { if (LocaleNCompare(expression,"ln",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,beta, exception); return(log((double) alpha)); } if (LocaleNCompare(expression,"logtwo",6) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,beta, exception); return(log10((double) alpha))/log10(2.0); } if (LocaleNCompare(expression,"log",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return(log10((double) alpha)); } if (LocaleCompare(expression,"lightness") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'M': case 'm': { if (LocaleCompare(expression,"MaxRGB") == 0) return((double) QuantumRange); if (LocaleNCompare(expression,"maxima",6) == 0) break; if (LocaleNCompare(expression,"max",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return(alpha > *beta ? alpha : *beta); } if (LocaleNCompare(expression,"minima",6) == 0) break; if (LocaleNCompare(expression,"min",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return(alpha < *beta ? alpha : *beta); } if (LocaleNCompare(expression,"mod",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); gamma=alpha-floor((double) (alpha/(*beta)))*(*beta); return(gamma); } if (LocaleCompare(expression,"m") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'N': case 'n': { if (LocaleNCompare(expression,"not",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return((double) (alpha < MagickEpsilon)); } if (LocaleCompare(expression,"n") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'O': case 'o': { if (LocaleCompare(expression,"Opaque") == 0) return(1.0); if (LocaleCompare(expression,"o") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'P': case 'p': { if (LocaleCompare(expression,"phi") == 0) return(MagickPHI); if (LocaleCompare(expression,"pi") == 0) return(MagickPI); if (LocaleNCompare(expression,"pow",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return(pow((double) alpha,(double) *beta)); } if (LocaleCompare(expression,"p") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'Q': case 'q': { if (LocaleCompare(expression,"QuantumRange") == 0) return((double) QuantumRange); if (LocaleCompare(expression,"QuantumScale") == 0) return(QuantumScale); break; } case 'R': case 'r': { if (LocaleNCompare(expression,"rand",4) == 0) return(GetPseudoRandomValue(fx_info->random_info)); if (LocaleNCompare(expression,"round",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); return(floor((double) alpha+0.5)); } if (LocaleCompare(expression,"r") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'S': case 's': { if (LocaleCompare(expression,"saturation") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleNCompare(expression,"sign",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return(alpha < 0.0 ? -1.0 : 1.0); } if (LocaleNCompare(expression,"sinc",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); if (alpha == 0) return(1.0); gamma=(sin((double) (MagickPI*alpha))/(MagickPI*alpha)); return(gamma); } if (LocaleNCompare(expression,"sinh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return(sinh((double) alpha)); } if (LocaleNCompare(expression,"sin",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return(sin((double) alpha)); } if (LocaleNCompare(expression,"sqrt",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return(sqrt((double) alpha)); } if (LocaleNCompare(expression,"squish",6) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,beta, exception); return((1.0/(1.0+exp((double) (-alpha))))); } if (LocaleCompare(expression,"s") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'T': case 't': { if (LocaleNCompare(expression,"tanh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return(tanh((double) alpha)); } if (LocaleNCompare(expression,"tan",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return(tan((double) alpha)); } if (LocaleCompare(expression,"Transparent") == 0) return(0.0); if (LocaleNCompare(expression,"trunc",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); if (alpha >= 0.0) return(floor((double) alpha)); return(ceil((double) alpha)); } if (LocaleCompare(expression,"t") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'U': case 'u': { if (LocaleCompare(expression,"u") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'V': case 'v': { if (LocaleCompare(expression,"v") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'W': case 'w': { if (LocaleNCompare(expression,"while",5) == 0) { do { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); } while (fabs((double) alpha) >= MagickEpsilon); return(*beta); } if (LocaleCompare(expression,"w") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'Y': case 'y': { if (LocaleCompare(expression,"y") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'Z': case 'z': { if (LocaleCompare(expression,"z") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } default: break; } q=(char *) expression; alpha=InterpretSiPrefixValue(expression,&q); if (q == expression) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); return(alpha); } MagickExport MagickBooleanType FxEvaluateExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { MagickBooleanType status; status=FxEvaluateChannelExpression(fx_info,GrayChannel,0,0,alpha,exception); return(status); } MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { FILE *file; MagickBooleanType status; file=fx_info->file; fx_info->file=(FILE *) NULL; status=FxEvaluateChannelExpression(fx_info,GrayChannel,0,0,alpha,exception); fx_info->file=file; return(status); } MagickExport MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info, const ChannelType channel,const ssize_t x,const ssize_t y,double *alpha, ExceptionInfo *exception) { double beta; beta=0.0; *alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,&beta, exception); return(exception->severity == OptionError ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxImage() applies a mathematical expression to the specified image. % % The format of the FxImage method is: % % Image *FxImage(const Image *image,const char *expression, % ExceptionInfo *exception) % Image *FxImageChannel(const Image *image,const ChannelType channel, % const char *expression,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o expression: A mathematical expression. % % o exception: return any errors or warnings in this structure. % */ static FxInfo **DestroyFxThreadSet(FxInfo **fx_info) { register ssize_t i; assert(fx_info != (FxInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (fx_info[i] != (FxInfo *) NULL) fx_info[i]=DestroyFxInfo(fx_info[i]); fx_info=(FxInfo **) RelinquishMagickMemory(fx_info); return(fx_info); } static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression, ExceptionInfo *exception) { char *fx_expression; double alpha; FxInfo **fx_info; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info)); if (fx_info == (FxInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return((FxInfo **) NULL); } (void) ResetMagickMemory(fx_info,0,number_threads*sizeof(*fx_info)); if (*expression != '@') fx_expression=ConstantString(expression); else fx_expression=FileToString(expression+1,~0UL,exception); for (i=0; i < (ssize_t) number_threads; i++) { MagickBooleanType status; fx_info[i]=AcquireFxInfo(image,fx_expression); if (fx_info[i] == (FxInfo *) NULL) break; status=FxPreprocessExpression(fx_info[i],&alpha,exception); if (status == MagickFalse) break; } fx_expression=DestroyString(fx_expression); if (i < (ssize_t) number_threads) fx_info=DestroyFxThreadSet(fx_info); return(fx_info); } MagickExport Image *FxImage(const Image *image,const char *expression, ExceptionInfo *exception) { Image *fx_image; fx_image=FxImageChannel(image,GrayChannel,expression,exception); return(fx_image); } MagickExport Image *FxImageChannel(const Image *image,const ChannelType channel, const char *expression,ExceptionInfo *exception) { #define FxImageTag "Fx/Image" CacheView *fx_view; FxInfo **restrict fx_info; Image *fx_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); fx_info=AcquireFxThreadSet(image,expression,exception); if (fx_info == (FxInfo **) NULL) return((Image *) NULL); fx_image=CloneImage(image,0,0,MagickTrue,exception); if (fx_image == (Image *) NULL) { fx_info=DestroyFxThreadSet(fx_info); return((Image *) NULL); } if (SetImageStorageClass(fx_image,DirectClass) == MagickFalse) { InheritException(exception,&fx_image->exception); fx_info=DestroyFxThreadSet(fx_info); fx_image=DestroyImage(fx_image); return((Image *) NULL); } /* Fx image. */ status=MagickTrue; progress=0; fx_view=AcquireAuthenticCacheView(fx_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,fx_image,fx_image->rows,1) #endif for (y=0; y < (ssize_t) fx_image->rows; y++) { const int id = GetOpenMPThreadId(); double alpha; register IndexPacket *restrict fx_indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } fx_indexes=GetCacheViewAuthenticIndexQueue(fx_view); alpha=0.0; for (x=0; x < (ssize_t) fx_image->columns; x++) { if ((channel & RedChannel) != 0) { (void) FxEvaluateChannelExpression(fx_info[id],RedChannel,x,y, &alpha,exception); SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*alpha)); } if ((channel & GreenChannel) != 0) { (void) FxEvaluateChannelExpression(fx_info[id],GreenChannel,x,y, &alpha,exception); SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*alpha)); } if ((channel & BlueChannel) != 0) { (void) FxEvaluateChannelExpression(fx_info[id],BlueChannel,x,y, &alpha,exception); SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*alpha)); } if ((channel & OpacityChannel) != 0) { (void) FxEvaluateChannelExpression(fx_info[id],OpacityChannel,x,y, &alpha,exception); if (image->matte == MagickFalse) SetPixelOpacity(q,ClampToQuantum((MagickRealType) QuantumRange* alpha)); else SetPixelOpacity(q,ClampToQuantum((MagickRealType) (QuantumRange- QuantumRange*alpha))); } if (((channel & IndexChannel) != 0) && (fx_image->colorspace == CMYKColorspace)) { (void) FxEvaluateChannelExpression(fx_info[id],IndexChannel,x,y, &alpha,exception); SetPixelIndex(fx_indexes+x,ClampToQuantum((MagickRealType) QuantumRange*alpha)); } q++; } if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FxImageChannel) #endif proceed=SetImageProgress(image,FxImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } fx_view=DestroyCacheView(fx_view); fx_info=DestroyFxThreadSet(fx_info); if (status == MagickFalse) fx_image=DestroyImage(fx_image); return(fx_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I m p l o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ImplodeImage() creates a new image that is a copy of an existing % one with the image pixels "implode" by the specified percentage. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ImplodeImage method is: % % Image *ImplodeImage(const Image *image,const double amount, % ExceptionInfo *exception) % % A description of each parameter follows: % % o implode_image: Method ImplodeImage returns a pointer to the image % after it is implode. A null image is returned if there is a memory % shortage. % % o image: the image. % % o amount: Define the extent of the implosion. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ImplodeImage(const Image *image,const double amount, ExceptionInfo *exception) { #define ImplodeImageTag "Implode/Image" CacheView *image_view, *implode_view; double radius; Image *implode_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; PointInfo center, scale; ssize_t y; /* Initialize implode image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); implode_image=CloneImage(image,0,0,MagickTrue,exception); if (implode_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(implode_image,DirectClass) == MagickFalse) { InheritException(exception,&implode_image->exception); implode_image=DestroyImage(implode_image); return((Image *) NULL); } if (implode_image->background_color.opacity != OpaqueOpacity) implode_image->matte=MagickTrue; /* Compute scaling factor. */ scale.x=1.0; scale.y=1.0; center.x=0.5*image->columns; center.y=0.5*image->rows; radius=center.x; if (image->columns > image->rows) scale.y=(double) image->columns/(double) image->rows; else if (image->columns < image->rows) { scale.x=(double) image->rows/(double) image->columns; radius=center.y; } /* Implode image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(implode_image,&zero); image_view=AcquireVirtualCacheView(image,exception); implode_view=AcquireAuthenticCacheView(implode_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,implode_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double distance; MagickPixelPacket pixel; PointInfo delta; register IndexPacket *restrict implode_indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } implode_indexes=GetCacheViewAuthenticIndexQueue(implode_view); delta.y=scale.y*(double) (y-center.y); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { /* Determine if the pixel is within an ellipse. */ delta.x=scale.x*(double) (x-center.x); distance=delta.x*delta.x+delta.y*delta.y; if (distance < (radius*radius)) { double factor; /* Implode the pixel. */ factor=1.0; if (distance > 0.0) factor=pow(sin((double) (MagickPI*sqrt((double) distance)/ radius/2)),-amount); (void) InterpolateMagickPixelPacket(image,image_view, UndefinedInterpolatePixel,(double) (factor*delta.x/scale.x+ center.x),(double) (factor*delta.y/scale.y+center.y),&pixel, exception); SetPixelPacket(implode_image,&pixel,q,implode_indexes+x); } q++; } if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ImplodeImage) #endif proceed=SetImageProgress(image,ImplodeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } implode_view=DestroyCacheView(implode_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) implode_image=DestroyImage(implode_image); return(implode_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The MorphImages() method requires a minimum of two images. The first % image is transformed into the second by a number of intervening images % as specified by frames. % % The format of the MorphImage method is: % % Image *MorphImages(const Image *image,const size_t number_frames, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_frames: Define the number of in-between image to generate. % The more in-between frames, the smoother the morph. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MorphImages(const Image *image, const size_t number_frames,ExceptionInfo *exception) { #define MorphImageTag "Morph/Image" double alpha, beta; Image *morph_image, *morph_images; MagickBooleanType status; MagickOffsetType scene; register const Image *next; register ssize_t i; ssize_t y; /* Clone first frame in sequence. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); morph_images=CloneImage(image,0,0,MagickTrue,exception); if (morph_images == (Image *) NULL) return((Image *) NULL); if (GetNextImageInList(image) == (Image *) NULL) { /* Morph single image. */ for (i=1; i < (ssize_t) number_frames; i++) { morph_image=CloneImage(image,0,0,MagickTrue,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) i, number_frames); if (proceed == MagickFalse) status=MagickFalse; } } return(GetFirstImageInList(morph_images)); } /* Morph image sequence. */ status=MagickTrue; scene=0; next=image; for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next)) { for (i=0; i < (ssize_t) number_frames; i++) { CacheView *image_view, *morph_view; beta=(double) (i+1.0)/(double) (number_frames+1.0); alpha=1.0-beta; morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta* GetNextImageInList(next)->columns+0.5),(size_t) (alpha* next->rows+beta*GetNextImageInList(next)->rows+0.5), next->filter,next->blur,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } if (SetImageStorageClass(morph_image,DirectClass) == MagickFalse) { InheritException(exception,&morph_image->exception); morph_image=DestroyImage(morph_image); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); morph_images=GetLastImageInList(morph_images); morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns, morph_images->rows,GetNextImageInList(next)->filter, GetNextImageInList(next)->blur,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } image_view=AcquireVirtualCacheView(morph_image,exception); morph_view=AcquireAuthenticCacheView(morph_images,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(morph_image,morph_image,morph_image->rows,1) #endif for (y=0; y < (ssize_t) morph_images->rows; y++) { MagickBooleanType sync; register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1, exception); q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) morph_images->columns; x++) { SetPixelRed(q,ClampToQuantum(alpha* GetPixelRed(q)+beta*GetPixelRed(p))); SetPixelGreen(q,ClampToQuantum(alpha* GetPixelGreen(q)+beta*GetPixelGreen(p))); SetPixelBlue(q,ClampToQuantum(alpha* GetPixelBlue(q)+beta*GetPixelBlue(p))); SetPixelOpacity(q,ClampToQuantum(alpha* GetPixelOpacity(q)+beta*GetPixelOpacity(p))); p++; q++; } sync=SyncCacheViewAuthenticPixels(morph_view,exception); if (sync == MagickFalse) status=MagickFalse; } morph_view=DestroyCacheView(morph_view); image_view=DestroyCacheView(image_view); morph_image=DestroyImage(morph_image); } if (i < (ssize_t) number_frames) break; /* Clone last frame in sequence. */ morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); morph_images=GetLastImageInList(morph_images); if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MorphImages) #endif proceed=SetImageProgress(image,MorphImageTag,scene, GetImageListLength(image)); if (proceed == MagickFalse) status=MagickFalse; } scene++; } if (GetNextImageInList(next) != (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } return(GetFirstImageInList(morph_images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P l a s m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PlasmaImage() initializes an image with plasma fractal values. The image % must be initialized with a base color and the random number generator % seeded before this method is called. % % The format of the PlasmaImage method is: % % MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment, % size_t attenuate,size_t depth) % % A description of each parameter follows: % % o image: the image. % % o segment: Define the region to apply plasma fractals values. % % o attenuate: Define the plasma attenuation factor. % % o depth: Limit the plasma recursion depth. % */ static inline Quantum PlasmaPixel(RandomInfo *random_info, const MagickRealType pixel,const double noise) { Quantum plasma; plasma=ClampToQuantum(pixel+noise*GetPseudoRandomValue(random_info)- noise/2.0); return(plasma); } MagickExport MagickBooleanType PlasmaImageProxy(Image *image, CacheView *image_view,RandomInfo *random_info,const SegmentInfo *segment, size_t attenuate,size_t depth) { ExceptionInfo *exception; double plasma; PixelPacket u, v; ssize_t x, x_mid, y, y_mid; if (((segment->x2-segment->x1) == 0.0) && ((segment->y2-segment->y1) == 0.0)) return(MagickTrue); if (depth != 0) { SegmentInfo local_info; /* Divide the area into quadrants and recurse. */ depth--; attenuate++; x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5); y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5); local_info=(*segment); local_info.x2=(double) x_mid; local_info.y2=(double) y_mid; (void) PlasmaImageProxy(image,image_view,random_info,&local_info, attenuate,depth); local_info=(*segment); local_info.y1=(double) y_mid; local_info.x2=(double) x_mid; (void) PlasmaImageProxy(image,image_view,random_info,&local_info, attenuate,depth); local_info=(*segment); local_info.x1=(double) x_mid; local_info.y2=(double) y_mid; (void) PlasmaImageProxy(image,image_view,random_info,&local_info, attenuate,depth); local_info=(*segment); local_info.x1=(double) x_mid; local_info.y1=(double) y_mid; return(PlasmaImageProxy(image,image_view,random_info,&local_info, attenuate,depth)); } x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5); y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5); if ((segment->x1 == (double) x_mid) && (segment->x2 == (double) x_mid) && (segment->y1 == (double) y_mid) && (segment->y2 == (double) y_mid)) return(MagickFalse); /* Average pixels and apply plasma. */ exception=(&image->exception); plasma=(double) QuantumRange/(2.0*attenuate); if ((segment->x1 != (double) x_mid) || (segment->x2 != (double) x_mid)) { register PixelPacket *restrict q; /* Left pixel. */ x=(ssize_t) ceil(segment->x1-0.5); (void) GetOneCacheViewVirtualPixel(image_view,x,(ssize_t) ceil(segment->y1-0.5),&u,exception); (void) GetOneCacheViewVirtualPixel(image_view,x,(ssize_t) ceil(segment->y2-0.5),&v,exception); q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickTrue); SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/2.0, plasma)); SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType) (u.green+ v.green)/2.0,plasma)); SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType) (u.blue+v.blue)/ 2.0,plasma)); (void) SyncCacheViewAuthenticPixels(image_view,exception); if (segment->x1 != segment->x2) { /* Right pixel. */ x=(ssize_t) ceil(segment->x2-0.5); (void) GetOneCacheViewVirtualPixel(image_view,x,(ssize_t) ceil(segment->y1-0.5),&u,exception); (void) GetOneCacheViewVirtualPixel(image_view,x,(ssize_t) ceil(segment->y2-0.5),&v,exception); q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickTrue); SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/ 2.0,plasma)); SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType) (u.green+ v.green)/2.0,plasma)); SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType) (u.blue+ v.blue)/2.0,plasma)); (void) SyncCacheViewAuthenticPixels(image_view,exception); } } if ((segment->y1 != (double) y_mid) || (segment->y2 != (double) y_mid)) { if ((segment->x1 != (double) x_mid) || (segment->y2 != (double) y_mid)) { register PixelPacket *restrict q; /* Bottom pixel. */ y=(ssize_t) ceil(segment->y2-0.5); (void) GetOneCacheViewVirtualPixel(image_view,(ssize_t) ceil(segment->x1-0.5),y,&u,exception); (void) GetOneCacheViewVirtualPixel(image_view,(ssize_t) ceil(segment->x2-0.5),y,&v,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickTrue); SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/ 2.0,plasma)); SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType) (u.green+ v.green)/2.0,plasma)); SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType) (u.blue+ v.blue)/2.0,plasma)); (void) SyncCacheViewAuthenticPixels(image_view,exception); } if (segment->y1 != segment->y2) { register PixelPacket *restrict q; /* Top pixel. */ y=(ssize_t) ceil(segment->y1-0.5); (void) GetOneCacheViewVirtualPixel(image_view,(ssize_t) ceil(segment->x1-0.5),y,&u,exception); (void) GetOneCacheViewVirtualPixel(image_view,(ssize_t) ceil(segment->x2-0.5),y,&v,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickTrue); SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType) (u.red+ v.red)/2.0,plasma)); SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType) (u.green+ v.green)/2.0,plasma)); SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType) (u.blue+ v.blue)/2.0,plasma)); (void) SyncCacheViewAuthenticPixels(image_view,exception); } } if ((segment->x1 != segment->x2) || (segment->y1 != segment->y2)) { register PixelPacket *restrict q; /* Middle pixel. */ x=(ssize_t) ceil(segment->x1-0.5); y=(ssize_t) ceil(segment->y1-0.5); (void) GetOneVirtualPixel(image,x,y,&u,exception); x=(ssize_t) ceil(segment->x2-0.5); y=(ssize_t) ceil(segment->y2-0.5); (void) GetOneCacheViewVirtualPixel(image_view,x,y,&v,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickTrue); SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/2.0, plasma)); SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType) (u.green+ v.green)/2.0,plasma)); SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType) (u.blue+v.blue)/ 2.0,plasma)); (void) SyncCacheViewAuthenticPixels(image_view,exception); } if (((segment->x2-segment->x1) < 3.0) && ((segment->y2-segment->y1) < 3.0)) return(MagickTrue); return(MagickFalse); } MagickExport MagickBooleanType PlasmaImage(Image *image, const SegmentInfo *segment,size_t attenuate,size_t depth) { CacheView *image_view; MagickBooleanType status; RandomInfo *random_info; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); image_view=AcquireVirtualCacheView(image,&image->exception); random_info=AcquireRandomInfo(); status=PlasmaImageProxy(image,image_view,random_info,segment,attenuate,depth); random_info=DestroyRandomInfo(random_info); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o l a r o i d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PolaroidImage() simulates a Polaroid picture. % % The format of the AnnotateImage method is: % % Image *PolaroidImage(const Image *image,const DrawInfo *draw_info, % const double angle,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info, const double angle,ExceptionInfo *exception) { const char *value; Image *bend_image, *caption_image, *flop_image, *picture_image, *polaroid_image, *rotate_image, *trim_image; size_t height; ssize_t quantum; /* Simulate a Polaroid picture. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double) image->rows)/25.0,10.0); height=image->rows+2*quantum; caption_image=(Image *) NULL; value=GetImageProperty(image,"Caption"); if (value != (const char *) NULL) { char *caption, geometry[MaxTextExtent]; DrawInfo *annotate_info; MagickBooleanType status; ssize_t count; TypeMetric metrics; /* Generate caption image. */ caption_image=CloneImage(image,image->columns,1,MagickTrue,exception); if (caption_image == (Image *) NULL) return((Image *) NULL); annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info); caption=InterpretImageProperties((ImageInfo *) NULL,(Image *) image, value); (void) CloneString(&annotate_info->text,caption); count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,&metrics, &caption); status=SetImageExtent(caption_image,image->columns,(size_t) ((count+1)*(metrics.ascent-metrics.descent)+0.5)); if (status == MagickFalse) caption_image=DestroyImage(caption_image); else { caption_image->background_color=image->border_color; (void) SetImageBackgroundColor(caption_image); (void) CloneString(&annotate_info->text,caption); (void) FormatLocaleString(geometry,MaxTextExtent,"+0+%g", metrics.ascent); if (annotate_info->gravity == UndefinedGravity) (void) CloneString(&annotate_info->geometry,AcquireString( geometry)); (void) AnnotateImage(caption_image,annotate_info); height+=caption_image->rows; } annotate_info=DestroyDrawInfo(annotate_info); caption=DestroyString(caption); } picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue, exception); if (picture_image == (Image *) NULL) { if (caption_image != (Image *) NULL) caption_image=DestroyImage(caption_image); return((Image *) NULL); } picture_image->background_color=image->border_color; (void) SetImageBackgroundColor(picture_image); (void) CompositeImage(picture_image,OverCompositeOp,image,quantum,quantum); if (caption_image != (Image *) NULL) { (void) CompositeImage(picture_image,OverCompositeOp,caption_image, quantum,(ssize_t) (image->rows+3*quantum/2)); caption_image=DestroyImage(caption_image); } (void) QueryColorDatabase("none",&picture_image->background_color,exception); (void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel); rotate_image=RotateImage(picture_image,90.0,exception); picture_image=DestroyImage(picture_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); picture_image=rotate_image; bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0* picture_image->columns,exception); picture_image=DestroyImage(picture_image); if (bend_image == (Image *) NULL) return((Image *) NULL); InheritException(&bend_image->exception,exception); picture_image=bend_image; rotate_image=RotateImage(picture_image,-90.0,exception); picture_image=DestroyImage(picture_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); picture_image=rotate_image; picture_image->background_color=image->background_color; polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3, exception); if (polaroid_image == (Image *) NULL) { picture_image=DestroyImage(picture_image); return(picture_image); } flop_image=FlopImage(polaroid_image,exception); polaroid_image=DestroyImage(polaroid_image); if (flop_image == (Image *) NULL) { picture_image=DestroyImage(picture_image); return(picture_image); } polaroid_image=flop_image; (void) CompositeImage(polaroid_image,OverCompositeOp,picture_image, (ssize_t) (-0.01*picture_image->columns/2.0),0L); picture_image=DestroyImage(picture_image); (void) QueryColorDatabase("none",&polaroid_image->background_color,exception); rotate_image=RotateImage(polaroid_image,angle,exception); polaroid_image=DestroyImage(polaroid_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); polaroid_image=rotate_image; trim_image=TrimImage(polaroid_image,exception); polaroid_image=DestroyImage(polaroid_image); if (trim_image == (Image *) NULL) return((Image *) NULL); polaroid_image=trim_image; return(polaroid_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p i a T o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSepiaToneImage() applies a special effect to the image, similar to the % effect achieved in a photo darkroom by sepia toning. Threshold ranges from % 0 to QuantumRange and is a measure of the extent of the sepia toning. A % threshold of 80% is a good starting point for a reasonable tone. % % The format of the SepiaToneImage method is: % % Image *SepiaToneImage(const Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: the tone threshold. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SepiaToneImage(const Image *image,const double threshold, ExceptionInfo *exception) { #define SepiaToneImageTag "SepiaTone/Image" CacheView *image_view, *sepia_view; Image *sepia_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize sepia-toned image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); sepia_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (sepia_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(sepia_image,DirectClass) == MagickFalse) { InheritException(exception,&sepia_image->exception); sepia_image=DestroyImage(sepia_image); return((Image *) NULL); } /* Tone each row of the image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); sepia_view=AcquireAuthenticCacheView(sepia_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,sepia_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double intensity, tone; intensity=GetPixelIntensity(image,p); tone=intensity > threshold ? (double) QuantumRange : intensity+ (double) QuantumRange-threshold; SetPixelRed(q,ClampToQuantum(tone)); tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange : intensity+(double) QuantumRange-7.0*threshold/6.0; SetPixelGreen(q,ClampToQuantum(tone)); tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0; SetPixelBlue(q,ClampToQuantum(tone)); tone=threshold/7.0; if ((double) GetPixelGreen(q) < tone) SetPixelGreen(q,ClampToQuantum(tone)); if ((double) GetPixelBlue(q) < tone) SetPixelBlue(q,ClampToQuantum(tone)); SetPixelOpacity(q,GetPixelOpacity(p)); p++; q++; } if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SepiaToneImage) #endif proceed=SetImageProgress(image,SepiaToneImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sepia_view=DestroyCacheView(sepia_view); image_view=DestroyCacheView(image_view); (void) NormalizeImage(sepia_image); (void) ContrastImage(sepia_image,MagickTrue); if (status == MagickFalse) sepia_image=DestroyImage(sepia_image); return(sepia_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d o w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadowImage() simulates a shadow from the specified image and returns it. % % The format of the ShadowImage method is: % % Image *ShadowImage(const Image *image,const double opacity, % const double sigma,const ssize_t x_offset,const ssize_t y_offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o opacity: percentage transparency. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o x_offset: the shadow x-offset. % % o y_offset: the shadow y-offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadowImage(const Image *image,const double opacity, const double sigma,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { #define ShadowImageTag "Shadow/Image" CacheView *image_view; Image *border_image, *clone_image, *shadow_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo border_info; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(clone_image,sRGBColorspace); (void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod); clone_image->compose=OverCompositeOp; border_info.width=(size_t) floor(2.0*sigma+0.5); border_info.height=(size_t) floor(2.0*sigma+0.5); border_info.x=0; border_info.y=0; (void) QueryColorDatabase("none",&clone_image->border_color,exception); border_image=BorderImage(clone_image,&border_info,exception); clone_image=DestroyImage(clone_image); if (border_image == (Image *) NULL) return((Image *) NULL); if (border_image->matte == MagickFalse) (void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel); /* Shadow image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(border_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(border_image,border_image,border_image->rows,1) #endif for (y=0; y < (ssize_t) border_image->rows; y++) { register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) border_image->columns; x++) { SetPixelRed(q,border_image->background_color.red); SetPixelGreen(q,border_image->background_color.green); SetPixelBlue(q,border_image->background_color.blue); if (border_image->matte == MagickFalse) SetPixelOpacity(q,border_image->background_color.opacity); else SetPixelOpacity(q,ClampToQuantum((double) (QuantumRange- GetPixelAlpha(q)*opacity/100.0))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ShadowImage) #endif proceed=SetImageProgress(image,ShadowImageTag,progress++, border_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); shadow_image=BlurImageChannel(border_image,AlphaChannel,0.0,sigma,exception); border_image=DestroyImage(border_image); if (shadow_image == (Image *) NULL) return((Image *) NULL); if (shadow_image->page.width == 0) shadow_image->page.width=shadow_image->columns; if (shadow_image->page.height == 0) shadow_image->page.height=shadow_image->rows; shadow_image->page.width+=x_offset-(ssize_t) border_info.width; shadow_image->page.height+=y_offset-(ssize_t) border_info.height; shadow_image->page.x+=x_offset-(ssize_t) border_info.width; shadow_image->page.y+=y_offset-(ssize_t) border_info.height; return(shadow_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S k e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SketchImage() simulates a pencil sketch. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SketchImage() selects a suitable radius for you. Angle gives the angle % of the sketch. % % The format of the SketchImage method is: % % Image *SketchImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting % the center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SketchImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { CacheView *random_view; Image *blend_image, *blur_image, *dodge_image, *random_image, *sketch_image; MagickBooleanType status; MagickPixelPacket zero; RandomInfo **restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Sketch image. */ random_image=CloneImage(image,image->columns << 1,image->rows << 1, MagickTrue,exception); if (random_image == (Image *) NULL) return((Image *) NULL); random_view=AcquireAuthenticCacheView(random_image,exception); if (AccelerateRandomImage(random_image,exception) ==MagickFalse) { status=MagickTrue; GetMagickPixelPacket(random_image,&zero); random_info=AcquireRandomInfoThreadSet(); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #endif random_view=AcquireAuthenticCacheView(random_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(random_image,random_image,random_image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) random_image->rows; y++) { const int id = GetOpenMPThreadId(); MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(random_view); pixel=zero; for (x=0; x < (ssize_t) random_image->columns; x++) { pixel.red=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); pixel.green=pixel.red; pixel.blue=pixel.red; if (image->colorspace == CMYKColorspace) pixel.index=pixel.red; SetPixelPacket(random_image,&pixel,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse) status=MagickFalse; } random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) { random_view=DestroyCacheView(random_view); random_image=DestroyImage(random_image); return(random_image); } } random_view=DestroyCacheView(random_view); blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception); random_image=DestroyImage(random_image); if (blur_image == (Image *) NULL) return((Image *) NULL); dodge_image=EdgeImage(blur_image,radius,exception); blur_image=DestroyImage(blur_image); if (dodge_image == (Image *) NULL) return((Image *) NULL); (void) NormalizeImage(dodge_image); (void) NegateImage(dodge_image,MagickFalse); (void) TransformImage(&dodge_image,(char *) NULL,"50%"); sketch_image=CloneImage(image,0,0,MagickTrue,exception); if (sketch_image == (Image *) NULL) { dodge_image=DestroyImage(dodge_image); return((Image *) NULL); } (void) CompositeImage(sketch_image,ColorDodgeCompositeOp,dodge_image,0,0); dodge_image=DestroyImage(dodge_image); blend_image=CloneImage(image,0,0,MagickTrue,exception); if (blend_image == (Image *) NULL) { sketch_image=DestroyImage(sketch_image); return((Image *) NULL); } (void) SetImageArtifact(blend_image,"compose:args","20x80"); (void) CompositeImage(sketch_image,BlendCompositeOp,blend_image,0,0); blend_image=DestroyImage(blend_image); return(sketch_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S o l a r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SolarizeImage() applies a special effect to the image, similar to the effect % achieved in a photo darkroom by selectively exposing areas of photo % sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a % measure of the extent of the solarization. % % The format of the SolarizeImage method is: % % MagickBooleanType SolarizeImage(Image *image,const double threshold) % MagickBooleanType SolarizeImageChannel(Image *image, % const ChannelType channel,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o threshold: Define the extent of the solarization. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SolarizeImage(Image *image, const double threshold) { MagickBooleanType status; status=SolarizeImageChannel(image,DefaultChannels,threshold, &image->exception); return(status); } MagickExport MagickBooleanType SolarizeImageChannel(Image *image, const ChannelType channel,const double threshold,ExceptionInfo *exception) { #define SolarizeImageTag "Solarize/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); if (image->storage_class == PseudoClass) { register ssize_t i; /* Solarize colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) if ((double) image->colormap[i].red > threshold) image->colormap[i].red=QuantumRange-image->colormap[i].red; if ((channel & GreenChannel) != 0) if ((double) image->colormap[i].green > threshold) image->colormap[i].green=QuantumRange-image->colormap[i].green; if ((channel & BlueChannel) != 0) if ((double) image->colormap[i].blue > threshold) image->colormap[i].blue=QuantumRange-image->colormap[i].blue; } } /* Solarize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) if ((double) GetPixelRed(q) > threshold) SetPixelRed(q,QuantumRange-GetPixelRed(q)); if ((channel & GreenChannel) != 0) if ((double) GetPixelGreen(q) > threshold) SetPixelGreen(q,QuantumRange-GetPixelGreen(q)); if ((channel & BlueChannel) != 0) if ((double) GetPixelBlue(q) > threshold) SetPixelBlue(q,QuantumRange-GetPixelBlue(q)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SolarizeImage) #endif proceed=SetImageProgress(image,SolarizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t e g a n o I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SteganoImage() hides a digital watermark within the image. Recover % the hidden watermark later to prove that the authenticity of an image. % Offset defines the start position within the image to hide the watermark. % % The format of the SteganoImage method is: % % Image *SteganoImage(const Image *image,Image *watermark, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o watermark: the watermark image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SteganoImage(const Image *image,const Image *watermark, ExceptionInfo *exception) { #define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0) #define SetBit(alpha,i,set) (alpha)=(Quantum) ((set) != 0 ? (size_t) (alpha) \ | (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i))) #define SteganoImageTag "Stegano/Image" CacheView *stegano_view, *watermark_view; Image *stegano_image; int c; MagickBooleanType status; PixelPacket pixel; register PixelPacket *q; register ssize_t x; size_t depth, one; ssize_t i, j, k, y; /* Initialize steganographic image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(watermark != (const Image *) NULL); assert(watermark->signature == MagickSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); one=1UL; stegano_image=CloneImage(image,0,0,MagickTrue,exception); if (stegano_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(stegano_image,DirectClass) == MagickFalse) { InheritException(exception,&stegano_image->exception); stegano_image=DestroyImage(stegano_image); return((Image *) NULL); } stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH; /* Hide watermark in low-order bits of image. */ c=0; i=0; j=0; depth=stegano_image->depth; k=image->offset; status=MagickTrue; watermark_view=AcquireVirtualCacheView(watermark,exception); stegano_view=AcquireAuthenticCacheView(stegano_image,exception); for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--) { for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++) { for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++) { (void) GetOneCacheViewVirtualPixel(watermark_view,x,y,&pixel,exception); if ((k/(ssize_t) stegano_image->columns) >= (ssize_t) stegano_image->rows) break; q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t) stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1, exception); if (q == (PixelPacket *) NULL) break; switch (c) { case 0: { SetBit(GetPixelRed(q),j,GetBit(ClampToQuantum(GetPixelIntensity( image,&pixel)),i)); break; } case 1: { SetBit(GetPixelGreen(q),j,GetBit(ClampToQuantum(GetPixelIntensity( image,&pixel)),i)); break; } case 2: { SetBit(GetPixelBlue(q),j,GetBit(ClampToQuantum(GetPixelIntensity( image,&pixel)),i)); break; } } if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse) break; c++; if (c == 3) c=0; k++; if (k == (ssize_t) (stegano_image->columns*stegano_image->columns)) k=0; if (k == image->offset) j++; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType) (depth-i),depth); if (proceed == MagickFalse) status=MagickFalse; } } stegano_view=DestroyCacheView(stegano_view); watermark_view=DestroyCacheView(watermark_view); if (stegano_image->storage_class == PseudoClass) (void) SyncImage(stegano_image); if (status == MagickFalse) stegano_image=DestroyImage(stegano_image); return(stegano_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t e r e o A n a g l y p h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StereoAnaglyphImage() combines two images and produces a single image that % is the composite of a left and right image of a stereo pair. Special % red-green stereo glasses are required to view this effect. % % The format of the StereoAnaglyphImage method is: % % Image *StereoImage(const Image *left_image,const Image *right_image, % ExceptionInfo *exception) % Image *StereoAnaglyphImage(const Image *left_image, % const Image *right_image,const ssize_t x_offset,const ssize_t y_offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o left_image: the left image. % % o right_image: the right image. % % o exception: return any errors or warnings in this structure. % % o x_offset: amount, in pixels, by which the left image is offset to the % right of the right image. % % o y_offset: amount, in pixels, by which the left image is offset to the % bottom of the right image. % % */ MagickExport Image *StereoImage(const Image *left_image, const Image *right_image,ExceptionInfo *exception) { return(StereoAnaglyphImage(left_image,right_image,0,0,exception)); } MagickExport Image *StereoAnaglyphImage(const Image *left_image, const Image *right_image,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { #define StereoImageTag "Stereo/Image" const Image *image; Image *stereo_image; MagickBooleanType status; ssize_t y; assert(left_image != (const Image *) NULL); assert(left_image->signature == MagickSignature); if (left_image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", left_image->filename); assert(right_image != (const Image *) NULL); assert(right_image->signature == MagickSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); assert(right_image != (const Image *) NULL); image=left_image; if ((left_image->columns != right_image->columns) || (left_image->rows != right_image->rows)) ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer"); /* Initialize stereo image attributes. */ stereo_image=CloneImage(left_image,left_image->columns,left_image->rows, MagickTrue,exception); if (stereo_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(stereo_image,DirectClass) == MagickFalse) { InheritException(exception,&stereo_image->exception); stereo_image=DestroyImage(stereo_image); return((Image *) NULL); } (void) SetImageColorspace(stereo_image,sRGBColorspace); /* Copy left image to red channel and right image to blue channel. */ status=MagickTrue; for (y=0; y < (ssize_t) stereo_image->rows; y++) { register const PixelPacket *restrict p, *restrict q; register ssize_t x; register PixelPacket *restrict r; p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1, exception); q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception); r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception); if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) stereo_image->columns; x++) { SetPixelRed(r,GetPixelRed(p)); SetPixelGreen(r,GetPixelGreen(q)); SetPixelBlue(r,GetPixelBlue(q)); SetPixelOpacity(r,(GetPixelOpacity(p)+q->opacity)/2); p++; q++; r++; } if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse) break; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y, stereo_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } if (status == MagickFalse) stereo_image=DestroyImage(stereo_image); return(stereo_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S w i r l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SwirlImage() swirls the pixels about the center of the image, where % degrees indicates the sweep of the arc through which each pixel is moved. % You get a more dramatic effect as the degrees move from 1 to 360. % % The format of the SwirlImage method is: % % Image *SwirlImage(const Image *image,double degrees, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o degrees: Define the tightness of the swirling effect. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SwirlImage(const Image *image,double degrees, ExceptionInfo *exception) { #define SwirlImageTag "Swirl/Image" CacheView *image_view, *swirl_view; double radius; Image *swirl_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; PointInfo center, scale; ssize_t y; /* Initialize swirl image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); swirl_image=CloneImage(image,0,0,MagickTrue,exception); if (swirl_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(swirl_image,DirectClass) == MagickFalse) { InheritException(exception,&swirl_image->exception); swirl_image=DestroyImage(swirl_image); return((Image *) NULL); } if (swirl_image->background_color.opacity != OpaqueOpacity) swirl_image->matte=MagickTrue; /* Compute scaling factor. */ center.x=(double) image->columns/2.0; center.y=(double) image->rows/2.0; radius=MagickMax(center.x,center.y); scale.x=1.0; scale.y=1.0; if (image->columns > image->rows) scale.y=(double) image->columns/(double) image->rows; else if (image->columns < image->rows) scale.x=(double) image->rows/(double) image->columns; degrees=(double) DegreesToRadians(degrees); /* Swirl image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(swirl_image,&zero); image_view=AcquireVirtualCacheView(image,exception); swirl_view=AcquireAuthenticCacheView(swirl_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,swirl_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double distance; MagickPixelPacket pixel; PointInfo delta; register IndexPacket *restrict swirl_indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } swirl_indexes=GetCacheViewAuthenticIndexQueue(swirl_view); delta.y=scale.y*(double) (y-center.y); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { /* Determine if the pixel is within an ellipse. */ delta.x=scale.x*(double) (x-center.x); distance=delta.x*delta.x+delta.y*delta.y; if (distance < (radius*radius)) { double cosine, factor, sine; /* Swirl the pixel. */ factor=1.0-sqrt((double) distance)/radius; sine=sin((double) (degrees*factor*factor)); cosine=cos((double) (degrees*factor*factor)); (void) InterpolateMagickPixelPacket(image,image_view, UndefinedInterpolatePixel,(double) ((cosine*delta.x-sine*delta.y)/ scale.x+center.x),(double) ((sine*delta.x+cosine*delta.y)/scale.y+ center.y),&pixel,exception); SetPixelPacket(swirl_image,&pixel,q,swirl_indexes+x); } q++; } if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SwirlImage) #endif proceed=SetImageProgress(image,SwirlImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } swirl_view=DestroyCacheView(swirl_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) swirl_image=DestroyImage(swirl_image); return(swirl_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TintImage() applies a color vector to each pixel in the image. The length % of the vector is 0 for black and white and at its maximum for the midtones. % The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5)))) % % The format of the TintImage method is: % % Image *TintImage(const Image *image,const char *opacity, % const PixelPacket tint,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o opacity: A color value used for tinting. % % o tint: A color value used for tinting. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TintImage(const Image *image,const char *opacity, const PixelPacket tint,ExceptionInfo *exception) { #define TintImageTag "Tint/Image" CacheView *image_view, *tint_view; GeometryInfo geometry_info; Image *tint_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket color_vector, pixel; MagickStatusType flags; ssize_t y; /* Allocate tint image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); tint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (tint_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(tint_image,DirectClass) == MagickFalse) { InheritException(exception,&tint_image->exception); tint_image=DestroyImage(tint_image); return((Image *) NULL); } if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsPixelGray(&tint) == MagickFalse)) (void) SetImageColorspace(tint_image,sRGBColorspace); if (opacity == (const char *) NULL) return(tint_image); /* Determine RGB values of the tint color. */ flags=ParseGeometry(opacity,&geometry_info); pixel.red=geometry_info.rho; pixel.green=geometry_info.rho; pixel.blue=geometry_info.rho; pixel.opacity=(MagickRealType) OpaqueOpacity; if ((flags & SigmaValue) != 0) pixel.green=geometry_info.sigma; if ((flags & XiValue) != 0) pixel.blue=geometry_info.xi; if ((flags & PsiValue) != 0) pixel.opacity=geometry_info.psi; color_vector.red=(MagickRealType) (pixel.red*tint.red/100.0- PixelPacketIntensity(&tint)); color_vector.green=(MagickRealType) (pixel.green*tint.green/100.0- PixelPacketIntensity(&tint)); color_vector.blue=(MagickRealType) (pixel.blue*tint.blue/100.0- PixelPacketIntensity(&tint)); /* Tint image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); tint_view=AcquireAuthenticCacheView(tint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,tint_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double weight; MagickPixelPacket pixel; weight=QuantumScale*GetPixelRed(p)-0.5; pixel.red=(MagickRealType) GetPixelRed(p)+color_vector.red*(1.0-(4.0* (weight*weight))); SetPixelRed(q,ClampToQuantum(pixel.red)); weight=QuantumScale*GetPixelGreen(p)-0.5; pixel.green=(MagickRealType) GetPixelGreen(p)+color_vector.green*(1.0- (4.0*(weight*weight))); SetPixelGreen(q,ClampToQuantum(pixel.green)); weight=QuantumScale*GetPixelBlue(p)-0.5; pixel.blue=(MagickRealType) GetPixelBlue(p)+color_vector.blue*(1.0-(4.0* (weight*weight))); SetPixelBlue(q,ClampToQuantum(pixel.blue)); SetPixelOpacity(q,GetPixelOpacity(p)); p++; q++; } if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TintImage) #endif proceed=SetImageProgress(image,TintImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } tint_view=DestroyCacheView(tint_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) tint_image=DestroyImage(tint_image); return(tint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % V i g n e t t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % VignetteImage() softens the edges of the image in vignette style. % % The format of the VignetteImage method is: % % Image *VignetteImage(const Image *image,const double radius, % const double sigma,const ssize_t x,const ssize_t y, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o x, y: Define the x and y ellipse offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *VignetteImage(const Image *image,const double radius, const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception) { char ellipse[MaxTextExtent]; DrawInfo *draw_info; Image *blur_image, *canvas_image, *oval_image, *vignette_image; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(canvas_image,DirectClass) == MagickFalse) { InheritException(exception,&canvas_image->exception); canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } canvas_image->matte=MagickTrue; oval_image=CloneImage(canvas_image,canvas_image->columns,canvas_image->rows, MagickTrue,exception); if (oval_image == (Image *) NULL) { canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } (void) QueryColorDatabase("#000000",&oval_image->background_color,exception); (void) SetImageBackgroundColor(oval_image); draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL); (void) QueryColorDatabase("#ffffff",&draw_info->fill,exception); (void) QueryColorDatabase("#ffffff",&draw_info->stroke,exception); (void) FormatLocaleString(ellipse,MaxTextExtent, "ellipse %g,%g,%g,%g,0.0,360.0",image->columns/2.0, image->rows/2.0,image->columns/2.0-x,image->rows/2.0-y); draw_info->primitive=AcquireString(ellipse); (void) DrawImage(oval_image,draw_info); draw_info=DestroyDrawInfo(draw_info); blur_image=BlurImage(oval_image,radius,sigma,exception); oval_image=DestroyImage(oval_image); if (blur_image == (Image *) NULL) { canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } blur_image->matte=MagickFalse; (void) CompositeImage(canvas_image,CopyOpacityCompositeOp,blur_image,0,0); blur_image=DestroyImage(blur_image); vignette_image=MergeImageLayers(canvas_image,FlattenLayer,exception); canvas_image=DestroyImage(canvas_image); if (vignette_image != (Image *) NULL) (void) TransformImageColorspace(vignette_image,image->colorspace); return(vignette_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WaveImage() creates a "ripple" effect in the image by shifting the pixels % vertically along a sine wave whose amplitude and wavelength is specified % by the given parameters. % % The format of the WaveImage method is: % % Image *WaveImage(const Image *image,const double amplitude, % const double wave_length,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o amplitude, wave_length: Define the amplitude and wave length of the % sine wave. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *WaveImage(const Image *image,const double amplitude, const double wave_length,ExceptionInfo *exception) { #define WaveImageTag "Wave/Image" CacheView *image_view, *wave_view; Image *wave_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType *sine_map; register ssize_t i; ssize_t y; /* Initialize wave image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); wave_image=CloneImage(image,image->columns,(size_t) (image->rows+2.0* fabs(amplitude)),MagickTrue,exception); if (wave_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(wave_image,DirectClass) == MagickFalse) { InheritException(exception,&wave_image->exception); wave_image=DestroyImage(wave_image); return((Image *) NULL); } if (wave_image->background_color.opacity != OpaqueOpacity) wave_image->matte=MagickTrue; /* Allocate sine map. */ sine_map=(MagickRealType *) AcquireQuantumMemory((size_t) wave_image->columns, sizeof(*sine_map)); if (sine_map == (MagickRealType *) NULL) { wave_image=DestroyImage(wave_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) wave_image->columns; i++) sine_map[i]=fabs(amplitude)+amplitude*sin((double) ((2.0*MagickPI*i)/ wave_length)); /* Wave image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(wave_image,&zero); image_view=AcquireVirtualCacheView(image,exception); wave_view=AcquireAuthenticCacheView(wave_image,exception); (void) SetCacheViewVirtualPixelMethod(image_view, BackgroundVirtualPixelMethod); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,wave_image,wave_image->rows,1) #endif for (y=0; y < (ssize_t) wave_image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(wave_view); pixel=zero; for (x=0; x < (ssize_t) wave_image->columns; x++) { (void) InterpolateMagickPixelPacket(image,image_view, UndefinedInterpolatePixel,(double) x,(double) (y-sine_map[x]),&pixel, exception); SetPixelPacket(wave_image,&pixel,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_WaveImage) #endif proceed=SetImageProgress(image,WaveImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } wave_view=DestroyCacheView(wave_view); image_view=DestroyCacheView(image_view); sine_map=(MagickRealType *) RelinquishMagickMemory(sine_map); if (status == MagickFalse) wave_image=DestroyImage(wave_image); return(wave_image); }
linAlgNorm2.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ extern "C" void FUNC(norm2)(const dlong & Nblocks, const dlong & N, const dfloat * __restrict__ cpu_a, dfloat * __restrict__ normA){ dfloat wa2 = 0; #ifdef __NEKRS__OMP__ #pragma omp parallel for reduction(+:wa2) #endif for(int i=0;i<N;++i){ const dfloat ai = cpu_a[i]; wa2 += ai*ai; } normA[0] = wa2; } extern "C" void FUNC(norm2Many)(const dlong & Nblocks, const dlong & N, const dlong & Nfields, const dlong & offset, const dfloat * __restrict__ cpu_a, dfloat * __restrict__ normA){ dfloat wa2 = 0; #ifdef __NEKRS__OMP__ #pragma omp parallel for collapse(2) reduction(+:wa2) #endif for(int fld=0;fld<Nfields;fld++) { for(int i=0;i<N;++i){ const dlong id = i + fld*offset; const dfloat ai = cpu_a[id]; wa2 += ai*ai; } } normA[0] = wa2; }
ocp_nlp_sqp_rti.c
/* * Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren, * Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor, * Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan, * Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl * * This file is part of acados. * * The 2-Clause BSD License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE.; */ #include "acados/ocp_nlp/ocp_nlp_sqp_rti.h" // external #include <assert.h> #include <math.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #if defined(ACADOS_WITH_OPENMP) #include <omp.h> #endif // blasfeo #include "blasfeo/include/blasfeo_d_aux.h" #include "blasfeo/include/blasfeo_d_aux_ext_dep.h" #include "blasfeo/include/blasfeo_d_blas.h" // acados #include "acados/ocp_nlp/ocp_nlp_common.h" #include "acados/ocp_nlp/ocp_nlp_dynamics_cont.h" #include "acados/ocp_nlp/ocp_nlp_reg_common.h" #include "acados/ocp_qp/ocp_qp_common.h" #include "acados/utils/mem.h" #include "acados/utils/print.h" #include "acados/utils/timing.h" #include "acados/utils/types.h" #include "acados_c/ocp_qp_interface.h" /************************************************ * options ************************************************/ acados_size_t ocp_nlp_sqp_rti_opts_calculate_size(void *config_, void *dims_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; acados_size_t size = 0; size += sizeof(ocp_nlp_sqp_rti_opts); size += ocp_nlp_opts_calculate_size(config, dims); return size; } void *ocp_nlp_sqp_rti_opts_assign(void *config_, void *dims_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; char *c_ptr = (char *) raw_memory; ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) c_ptr; c_ptr += sizeof(ocp_nlp_sqp_rti_opts); opts->nlp_opts = ocp_nlp_opts_assign(config, dims, c_ptr); c_ptr += ocp_nlp_opts_calculate_size(config, dims); assert((char *) raw_memory + ocp_nlp_sqp_rti_opts_calculate_size(config, dims) >= c_ptr); return opts; } void ocp_nlp_sqp_rti_opts_initialize_default(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; // ocp_nlp_dynamics_config **dynamics = config->dynamics; // ocp_nlp_constraints_config **constraints = config->constraints; // int ii; // int N = dims->N; // this first !!! ocp_nlp_opts_initialize_default(config, dims, nlp_opts); // SQP RTI opts opts->ext_qp_res = 0; opts->warm_start_first_qp = false; opts->rti_phase = 0; opts->print_level = 0; // overwrite default submodules opts // do not compute adjoint in dynamics and constraints // int compute_adj = 0; // // dynamics // for (ii = 0; ii < N; ii++) // { // dynamics[ii]->opts_set(dynamics[ii], // opts->nlp_opts->dynamics[ii], "compute_adj", &compute_adj); // } // // constraints // for (ii = 0; ii <= N; ii++) // { // constraints[ii]->opts_set(constraints[ii], // opts->nlp_opts->constraints[ii], "compute_adj", &compute_adj); // } return; } void ocp_nlp_sqp_rti_opts_update(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_opts_update(config, dims, nlp_opts); return; } void ocp_nlp_sqp_rti_opts_set(void *config_, void *opts_, const char *field, void* value) { ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) opts_; ocp_nlp_config *config = config_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; int ii; char module[MAX_STR_LEN]; char *ptr_module = NULL; int module_length = 0; // extract module name char *char_ = strchr(field, '_'); if (char_!=NULL) { module_length = char_-field; for (ii=0; ii<module_length; ii++) module[ii] = field[ii]; module[module_length] = '\0'; // add end of string ptr_module = module; } // pass options to QP module if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) ) { ocp_nlp_opts_set(config, nlp_opts, field, value); if (!strcmp(field, "qp_warm_start")) { int* i_ptr = (int *) value; opts->qp_warm_start = *i_ptr; } } else // nlp opts { if (!strcmp(field, "ext_qp_res")) { int* ext_qp_res = (int *) value; opts->ext_qp_res = *ext_qp_res; } else if (!strcmp(field, "warm_start_first_qp")) { bool* warm_start_first_qp = (bool *) value; opts->warm_start_first_qp = *warm_start_first_qp; } else if (!strcmp(field, "rti_phase")) { int* rti_phase = (int *) value; if (*rti_phase < 0 || *rti_phase > 2) { printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for rti_phase field."); printf("possible values are: 0, 1, 2\n"); exit(1); } else opts->rti_phase = *rti_phase; } else if (!strcmp(field, "print_level")) { int* print_level = (int *) value; if (*print_level < 0) { printf("\nerror: ocp_nlp_sqp_rti_opts_set: invalid value for print_level field, need int >=0, got %d.", *print_level); exit(1); } opts->print_level = *print_level; } else { ocp_nlp_opts_set(config, nlp_opts, field, value); } } return; } void ocp_nlp_sqp_rti_opts_set_at_stage(void *config_, void *opts_, size_t stage, const char *field, void* value) { ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_opts_set_at_stage(config, nlp_opts, stage, field, value); } /************************************************ * memory ************************************************/ acados_size_t ocp_nlp_sqp_rti_memory_calculate_size(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; // ocp_nlp_dynamics_config **dynamics = config->dynamics; // ocp_nlp_cost_config **cost = config->cost; // ocp_nlp_constraints_config **constraints = config->constraints; // int N = dims->N; // int *nx = dims->nx; // int *nu = dims->nu; // int *nz = dims->nz; acados_size_t size = 0; size += sizeof(ocp_nlp_sqp_rti_memory); // nlp mem size += ocp_nlp_memory_calculate_size(config, dims, nlp_opts); // stat int stat_m = 1+1; int stat_n = 2; if (opts->ext_qp_res) stat_n += 4; size += stat_n*stat_m*sizeof(double); size += 8; // initial align make_int_multiple_of(8, &size); return size; } void *ocp_nlp_sqp_rti_memory_assign(void *config_, void *dims_, void *opts_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; // ocp_nlp_dynamics_config **dynamics = config->dynamics; // ocp_nlp_cost_config **cost = config->cost; // ocp_nlp_constraints_config **constraints = config->constraints; char *c_ptr = (char *) raw_memory; // int ii; // int N = dims->N; // int *nx = dims->nx; // int *nu = dims->nu; // int *nz = dims->nz; // initial align align_char_to(8, &c_ptr); ocp_nlp_sqp_rti_memory *mem = (ocp_nlp_sqp_rti_memory *) c_ptr; c_ptr += sizeof(ocp_nlp_sqp_rti_memory); // nlp mem mem->nlp_mem = ocp_nlp_memory_assign(config, dims, nlp_opts, c_ptr); c_ptr += ocp_nlp_memory_calculate_size(config, dims, nlp_opts); // stat mem->stat = (double *) c_ptr; mem->stat_m = 1+1; mem->stat_n = 2; if (opts->ext_qp_res) mem->stat_n += 4; c_ptr += mem->stat_m*mem->stat_n*sizeof(double); mem->status = ACADOS_READY; assert((char *) raw_memory+ocp_nlp_sqp_rti_memory_calculate_size( config, dims, opts) >= c_ptr); return mem; } /************************************************ * workspace ************************************************/ acados_size_t ocp_nlp_sqp_rti_workspace_calculate_size(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; acados_size_t size = 0; // sqp size += sizeof(ocp_nlp_sqp_rti_workspace); // nlp size += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts); // qp in size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // qp out size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); if (opts->ext_qp_res) { // qp res size += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims); // qp res ws size += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims); } return size; } static void ocp_nlp_sqp_rti_cast_workspace( ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_sqp_rti_opts *opts, ocp_nlp_sqp_rti_memory *mem, ocp_nlp_sqp_rti_workspace *work) { ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_memory *nlp_mem = mem->nlp_mem; // sqp char *c_ptr = (char *) work; c_ptr += sizeof(ocp_nlp_sqp_rti_workspace); // nlp work->nlp_work = ocp_nlp_workspace_assign( config, dims, nlp_opts, nlp_mem, c_ptr); c_ptr += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts); // qp in work->tmp_qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // qp out work->tmp_qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); if (opts->ext_qp_res) { // qp res work->qp_res = ocp_qp_res_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims); // qp res ws work->qp_res_ws = ocp_qp_res_workspace_assign( dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_res_workspace_calculate_size( dims->qp_solver->orig_dims); } assert((char *) work + ocp_nlp_sqp_rti_workspace_calculate_size(config, dims, opts) >= c_ptr); return; } /************************************************ * functions ************************************************/ int ocp_nlp_sqp_rti(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_sqp_rti_memory *mem = mem_; // zero timers acados_timer timer0; double total_time = 0.0; mem->time_tot = 0.0; ocp_nlp_sqp_rti_opts *nlp_opts = opts_; int rti_phase = nlp_opts->rti_phase; acados_tic(&timer0); switch(rti_phase) { // perform preparation and feedback rti_phase case 0: ocp_nlp_sqp_rti_preparation_step( config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_); ocp_nlp_sqp_rti_feedback_step( config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_); break; // perform preparation rti_phase case 1: ocp_nlp_sqp_rti_preparation_step( config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_); break; // perform feedback rti_phase case 2: ocp_nlp_sqp_rti_feedback_step( config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_); break; } total_time += acados_toc(&timer0); mem->time_tot = total_time; nlp_out->total_time = total_time; return mem->status; } void ocp_nlp_sqp_rti_preparation_step(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { acados_timer timer1; ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_sqp_rti_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_sqp_rti_workspace *work = work_; ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; mem->time_lin = 0.0; mem->time_reg = 0.0; int N = dims->N; int ii; #if defined(ACADOS_WITH_OPENMP) // backup number of threads int num_threads_bkp = omp_get_num_threads(); // set number of threads omp_set_num_threads(opts->nlp_opts->num_threads); #pragma omp parallel { // beginning of parallel region #endif // alias to dynamics_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif for (ii = 0; ii < N; ii++) { config->dynamics[ii]->memory_set_ux_ptr( nlp_out->ux+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_ux_ptr( nlp_work->tmp_nlp_out->ux+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_ux1_ptr( nlp_out->ux+ii+1, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_ux1_ptr( nlp_work->tmp_nlp_out->ux+ii+1, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_pi_ptr( nlp_out->pi+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_pi_ptr( nlp_work->tmp_nlp_out->pi+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_BAbt_ptr( nlp_mem->qp_in->BAbt+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_RSQrq_ptr( nlp_mem->qp_in->RSQrq+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_dzduxt_ptr( nlp_mem->dzduxt+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_sim_guess_ptr( nlp_mem->sim_guess+ii, nlp_mem->set_sim_guess+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_z_alg_ptr( nlp_mem->z_alg+ii, nlp_mem->dynamics[ii]); } // alias to cost_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif for (ii = 0; ii <= N; ii++) { config->cost[ii]->memory_set_ux_ptr( nlp_out->ux+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_tmp_ux_ptr( nlp_work->tmp_nlp_out->ux+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_z_alg_ptr( nlp_mem->z_alg+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_dzdux_tran_ptr( nlp_mem->dzduxt+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_RSQrq_ptr( nlp_mem->qp_in->RSQrq+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_Z_ptr( nlp_mem->qp_in->Z+ii, nlp_mem->cost[ii]); } // alias to constraints_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif for (ii = 0; ii <= N; ii++) { config->constraints[ii]->memory_set_ux_ptr( nlp_out->ux+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_tmp_ux_ptr( nlp_work->tmp_nlp_out->ux+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_lam_ptr( nlp_out->lam+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_tmp_lam_ptr( nlp_work->tmp_nlp_out->lam+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_z_alg_ptr( nlp_mem->z_alg+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_dzdux_tran_ptr( nlp_mem->dzduxt+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_DCt_ptr( nlp_mem->qp_in->DCt+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_RSQrq_ptr( nlp_mem->qp_in->RSQrq+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_idxb_ptr( nlp_mem->qp_in->idxb[ii], nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_idxs_rev_ptr( nlp_mem->qp_in->idxs_rev[ii], nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_idxe_ptr( nlp_mem->qp_in->idxe[ii], nlp_mem->constraints[ii]); } // alias to regularize memory config->regularize->memory_set_RSQrq_ptr( dims->regularize, nlp_mem->qp_in->RSQrq, nlp_mem->regularize_mem); config->regularize->memory_set_rq_ptr( dims->regularize, nlp_mem->qp_in->rqz, nlp_mem->regularize_mem); config->regularize->memory_set_BAbt_ptr( dims->regularize, nlp_mem->qp_in->BAbt, nlp_mem->regularize_mem); config->regularize->memory_set_b_ptr( dims->regularize, nlp_mem->qp_in->b, nlp_mem->regularize_mem); config->regularize->memory_set_idxb_ptr( dims->regularize, nlp_mem->qp_in->idxb, nlp_mem->regularize_mem); config->regularize->memory_set_DCt_ptr( dims->regularize, nlp_mem->qp_in->DCt, nlp_mem->regularize_mem); config->regularize->memory_set_ux_ptr( dims->regularize, nlp_mem->qp_out->ux, nlp_mem->regularize_mem); config->regularize->memory_set_pi_ptr( dims->regularize, nlp_mem->qp_out->pi, nlp_mem->regularize_mem); config->regularize->memory_set_lam_ptr( dims->regularize, nlp_mem->qp_out->lam, nlp_mem->regularize_mem); // copy sampling times into dynamics model #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif // NOTE(oj): this will lead in an error for irk_gnsf, T must be set in precompute; // -> remove here and make sure precompute is called everywhere (e.g. Python interface). for (ii = 0; ii < N; ii++) { config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii); } #if defined(ACADOS_WITH_OPENMP) } // end of parallel region #endif // initialize QP ocp_nlp_initialize_qp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); /* SQP body */ int sqp_iter = 0; nlp_mem->sqp_iter = &sqp_iter; // linearizate NLP and update QP matrices acados_tic(&timer1); ocp_nlp_approximate_qp_matrices(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); mem->time_lin += acados_toc(&timer1); #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif return; } void ocp_nlp_sqp_rti_feedback_step(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { acados_timer timer1; ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_sqp_rti_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_sqp_rti_workspace *work = work_; ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; int qp_iter = 0; int qp_status = 0; double tmp_time; mem->time_qp_sol = 0.0; mem->time_qp_solver_call = 0.0; mem->time_qp_xcond = 0.0; mem->time_glob = 0.0; // embed initial value (this actually updates all bounds at stage 0...) ocp_nlp_embed_initial_value(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // update QP rhs for SQP (step prim var, abs dual var) ocp_nlp_approximate_qp_vectors_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // regularize Hessian acados_tic(&timer1); config->regularize->regularize_hessian(config->regularize, dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem); mem->time_reg += acados_toc(&timer1); if (opts->print_level > 0) { printf("\n------- qp_in --------\n"); print_ocp_qp_in(nlp_mem->qp_in); } if (!opts->warm_start_first_qp) { int tmp_int = 0; config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "warm_start", &tmp_int); } // solve qp acados_tic(&timer1); qp_status = qp_solver->evaluate(qp_solver, dims->qp_solver, nlp_mem->qp_in, nlp_mem->qp_out, opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work); mem->time_qp_sol += acados_toc(&timer1); qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_solver_call", &tmp_time); mem->time_qp_solver_call += tmp_time; qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_xcond", &tmp_time); mem->time_qp_xcond += tmp_time; // compute correct dual solution in case of Hessian regularization acados_tic(&timer1); config->regularize->correct_dual_sol(config->regularize, dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem); mem->time_reg += acados_toc(&timer1); // TODO move into QP solver memory ??? qp_info *qp_info_; ocp_qp_out_get(nlp_mem->qp_out, "qp_info", &qp_info_); nlp_out->qp_iter = qp_info_->num_iter; qp_iter = qp_info_->num_iter; // compute external QP residuals (for debugging) if (opts->ext_qp_res) { ocp_qp_res_compute(nlp_mem->qp_in, nlp_mem->qp_out, work->qp_res, work->qp_res_ws); ocp_qp_res_compute_nrm_inf(work->qp_res, mem->stat+(mem->stat_n*1+2)); // printf("\nsqp_iter %d, res %e %e %e %e\n", sqp_iter, // inf_norm_qp_res[0], inf_norm_qp_res[1], // inf_norm_qp_res[2], inf_norm_qp_res[3]); } // printf("\n------- qp_out (sqp iter %d) ---------\n", sqp_iter); // print_ocp_qp_out(nlp_mem->qp_out); // exit(1); // save statistics mem->stat[mem->stat_n*1+0] = qp_status; mem->stat[mem->stat_n*1+1] = qp_iter; if ((qp_status!=ACADOS_SUCCESS) & (qp_status!=ACADOS_MAXITER)) { // print_ocp_qp_in(mem->qp_in); #ifndef ACADOS_SILENT printf("QP solver returned error status %d\n", qp_status); #endif mem->status = ACADOS_QP_FAILURE; return; } // globalization acados_tic(&timer1); double alpha = ocp_nlp_line_search(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); mem->time_glob += acados_toc(&timer1); // update variables ocp_nlp_update_variables_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work, alpha); // ocp_nlp_dims_print(nlp_out->dims); // ocp_nlp_out_print(nlp_out); // exit(1); // print_ocp_qp_in(mem->qp_in); mem->status = ACADOS_SUCCESS; } int ocp_nlp_sqp_rti_precompute(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_sqp_rti_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; // ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_nlp_sqp_rti_workspace *work = work_; ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; int N = dims->N; int status = ACADOS_SUCCESS; int ii; // TODO(giaf) flag to enable/disable checks for (ii = 0; ii <= N; ii++) { int module_val; config->constraints[ii]->dims_get(config->constraints[ii], dims->constraints[ii], "ns", &module_val); if (dims->ns[ii] != module_val) { printf("ocp_nlp_sqp_rti_precompute: inconsistent dimension ns \ for stage %d with constraint module, got %d, module: %d.", ii, dims->ns[ii], module_val); exit(1); } } // precompute for (ii = 0; ii < N; ii++) { // set T config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii); // dynamics precompute status = config->dynamics[ii]->precompute(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], opts->nlp_opts->dynamics[ii], nlp_mem->dynamics[ii], nlp_work->dynamics[ii]); if (status != ACADOS_SUCCESS) return status; } return status; } void ocp_nlp_sqp_rti_eval_param_sens(void *config_, void *dims_, void *opts_, void *mem_, void *work_, char *field, int stage, int index, void *sens_nlp_out_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_sqp_rti_memory *mem = mem_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_nlp_out *sens_nlp_out = sens_nlp_out_; ocp_nlp_sqp_rti_workspace *work = work_; ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; d_ocp_qp_copy_all(nlp_mem->qp_in, work->tmp_qp_in); d_ocp_qp_set_rhs_zero(work->tmp_qp_in); double one = 1.0; if ((!strcmp("ex", field)) & (stage==0)) { d_ocp_qp_set_el("lbx", stage, index, &one, work->tmp_qp_in); d_ocp_qp_set_el("ubx", stage, index, &one, work->tmp_qp_in); // d_ocp_qp_print(work->tmp_qp_in->dim, work->tmp_qp_in); config->qp_solver->eval_sens(config->qp_solver, dims->qp_solver, work->tmp_qp_in, work->tmp_qp_out, opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work); // d_ocp_qp_sol_print(work->tmp_qp_out->dim, work->tmp_qp_out); // exit(1); /* copy tmp_qp_out into sens_nlp_out */ int i; int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; // int *nz = dims->nz; for (i = 0; i <= N; i++) { blasfeo_dveccp(nv[i], work->tmp_qp_out->ux + i, 0, sens_nlp_out->ux + i, 0); if (i < N) blasfeo_dveccp(nx[i + 1], work->tmp_qp_out->pi + i, 0, sens_nlp_out->pi + i, 0); blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->lam + i, 0, sens_nlp_out->lam + i, 0); blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->t + i, 0, sens_nlp_out->t + i, 0); } } else { printf("\nerror: field %s at stage %d not available in \ ocp_nlp_sqp_rti_eval_param_sens\n", field, stage); exit(1); } return; } // TODO rename memory_get ??? void ocp_nlp_sqp_rti_get(void *config_, void *dims_, void *mem_, const char *field, void *return_value_) { ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; ocp_nlp_sqp_rti_memory *mem = mem_; if (!strcmp("sqp_iter", field)) { int *value = return_value_; *value = 1; } else if (!strcmp("status", field)) { int *value = return_value_; *value = mem->status; } else if (!strcmp("time_tot", field) || !strcmp("tot_time", field)) { double *value = return_value_; *value = mem->time_tot; } else if (!strcmp("time_qp_sol", field) || !strcmp("time_qp", field)) { double *value = return_value_; *value = mem->time_qp_sol; } else if (!strcmp("time_qp_solver", field) || !strcmp("time_qp_solver_call", field)) { double *value = return_value_; *value = mem->time_qp_solver_call; } else if (!strcmp("time_qp_xcond", field)) { double *value = return_value_; *value = mem->time_qp_xcond; } else if (!strcmp("time_lin", field)) { double *value = return_value_; *value = mem->time_lin; } else if (!strcmp("time_reg", field)) { double *value = return_value_; *value = mem->time_reg; } else if (!strcmp("time_glob", field)) { double *value = return_value_; *value = mem->time_glob; } else if (!strcmp("time_sim", field) || !strcmp("time_sim_ad", field) || !strcmp("time_sim_la", field)) { double tmp = 0.0; double *ptr = return_value_; int N = dims->N; int ii; for (ii=0; ii<N; ii++) { config->dynamics[ii]->memory_get(config->dynamics[ii], dims->dynamics[ii], mem->nlp_mem->dynamics[ii], field, &tmp); *ptr += tmp; } } else if (!strcmp("stat", field)) { double **value = return_value_; *value = mem->stat; } else if (!strcmp("statistics", field)) { int n_row = 2; double *value = return_value_; for (int ii=0; ii<n_row; ii++) { value[ii+0] = ii; for (int jj=0; jj<mem->stat_n; jj++) value[ii+(jj+1)*n_row] = mem->stat[jj+ii*mem->stat_n]; } } else if (!strcmp("stat_m", field)) { int *value = return_value_; *value = mem->stat_m; } else if (!strcmp("stat_n", field)) { int *value = return_value_; *value = mem->stat_n; } else if (!strcmp("nlp_mem", field)) { void **value = return_value_; *value = mem->nlp_mem; } else if (!strcmp("qp_xcond_dims", field)) { void **value = return_value_; *value = dims->qp_solver->xcond_dims; } else if (!strcmp("nlp_res", field)) { ocp_nlp_res **value = return_value_; *value = mem->nlp_mem->nlp_res; } else if (!strcmp("qp_xcond_in", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_solver_mem->xcond_qp_in; } else if (!strcmp("qp_xcond_out", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_solver_mem->xcond_qp_out; } else if (!strcmp("qp_in", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_in; } else if (!strcmp("qp_out", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_out; } else if (!strcmp("qp_iter", field)) { config->qp_solver->memory_get(config->qp_solver, mem->nlp_mem->qp_solver_mem, "iter", return_value_); } else if (!strcmp("res_stat", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_stat; } else if (!strcmp("res_eq", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_eq; } else if (!strcmp("res_ineq", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_ineq; } else if (!strcmp("res_comp", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_comp; } else if (!strcmp("cost_value", field)) { double *value = return_value_; *value = mem->nlp_mem->cost_value; } else { printf("\nerror: field %s not available in ocp_nlp_sqp_rti_get\n", field); exit(1); } } void ocp_nlp_sqp_rti_opts_get(void *config_, void *dims_, void *opts_, const char *field, void *return_value_) { // ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; if (!strcmp("nlp_opts", field)) { void **value = return_value_; *value = opts->nlp_opts; } else { printf("\nerror: field %s not available in ocp_nlp_sqp_rti_opts_get\n", field); exit(1); } } void ocp_nlp_sqp_rti_work_get(void *config_, void *dims_, void *work_, const char *field, void *return_value_) { // ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_workspace *work = work_; if (!strcmp("nlp_work", field)) { void **value = return_value_; *value = work->nlp_work; } else { printf("\nerror: field %s not available in ocp_nlp_sqp_rti_work_get\n", field); exit(1); } } void ocp_nlp_sqp_rti_config_initialize_default(void *config_) { ocp_nlp_config *config = (ocp_nlp_config *) config_; config->opts_calculate_size = &ocp_nlp_sqp_rti_opts_calculate_size; config->opts_assign = &ocp_nlp_sqp_rti_opts_assign; config->opts_initialize_default = &ocp_nlp_sqp_rti_opts_initialize_default; config->opts_update = &ocp_nlp_sqp_rti_opts_update; config->opts_set = &ocp_nlp_sqp_rti_opts_set; config->opts_set_at_stage = &ocp_nlp_sqp_rti_opts_set_at_stage; config->memory_calculate_size = &ocp_nlp_sqp_rti_memory_calculate_size; config->memory_assign = &ocp_nlp_sqp_rti_memory_assign; config->workspace_calculate_size = &ocp_nlp_sqp_rti_workspace_calculate_size; config->evaluate = &ocp_nlp_sqp_rti; config->eval_param_sens = &ocp_nlp_sqp_rti_eval_param_sens; config->config_initialize_default = &ocp_nlp_sqp_rti_config_initialize_default; config->precompute = &ocp_nlp_sqp_rti_precompute; config->get = &ocp_nlp_sqp_rti_get; config->opts_get = &ocp_nlp_sqp_rti_opts_get; config->work_get = &ocp_nlp_sqp_rti_work_get; return; }
quantize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE % % Q Q U U A A NN N T I ZZ E % % Q Q U U AAAAA N N N T I ZZZ EEEEE % % Q QQ U U A A N NN T I ZZ E % % QQQQ UUU A A N N T IIIII ZZZZZ EEEEE % % % % % % MagickCore Methods to Reduce the Number of Unique Colors in an Image % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Realism in computer graphics typically requires using 24 bits/pixel to % generate an image. Yet many graphic display devices do not contain the % amount of memory necessary to match the spatial and color resolution of % the human eye. The Quantize methods takes a 24 bit image and reduces % the number of colors so it can be displayed on raster device with less % bits per pixel. In most instances, the quantized image closely % resembles the original reference image. % % A reduction of colors in an image is also desirable for image % transmission and real-time animation. % % QuantizeImage() takes a standard RGB or monochrome images and quantizes % them down to some fixed number of colors. % % For purposes of color allocation, an image is a set of n pixels, where % each pixel is a point in RGB space. RGB space is a 3-dimensional % vector space, and each pixel, Pi, is defined by an ordered triple of % red, green, and blue coordinates, (Ri, Gi, Bi). % % Each primary color component (red, green, or blue) represents an % intensity which varies linearly from 0 to a maximum value, Cmax, which % corresponds to full saturation of that color. Color allocation is % defined over a domain consisting of the cube in RGB space with opposite % vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax = % 255. % % The algorithm maps this domain onto a tree in which each node % represents a cube within that domain. In the following discussion % these cubes are defined by the coordinate of two opposite vertices (vertex % nearest the origin in RGB space and the vertex farthest from the origin). % % The tree's root node represents the entire domain, (0,0,0) through % (Cmax,Cmax,Cmax). Each lower level in the tree is generated by % subdividing one node's cube into eight smaller cubes of equal size. % This corresponds to bisecting the parent cube with planes passing % through the midpoints of each edge. % % The basic algorithm operates in three phases: Classification, % Reduction, and Assignment. Classification builds a color description % tree for the image. Reduction collapses the tree until the number it % represents, at most, the number of colors desired in the output image. % Assignment defines the output image's color map and sets each pixel's % color by restorage_class in the reduced tree. Our goal is to minimize % the numerical discrepancies between the original colors and quantized % colors (quantization error). % % Classification begins by initializing a color description tree of % sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color description % tree in the storage_class phase for realistic values of Cmax. If % colors components in the input image are quantized to k-bit precision, % so that Cmax= 2k-1, the tree would need k levels below the root node to % allow representing each possible input color in a leaf. This becomes % prohibitive because the tree's total number of nodes is 1 + % sum(i=1, k, 8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing the pixel's color. It updates the following data for each % such node: % % n1: Number of pixels whose color is contained in the RGB cube which % this node represents; % % n2: Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb: Sums of the red, green, and blue component values for all % pixels not classified at a lower depth. The combination of these sums % and n2 will ultimately characterize the mean color of a set of pixels % represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the % quantization error for a node. % % Reduction repeatedly prunes the tree until the number of nodes with n2 % > 0 is less than or equal to the maximum number of colors allowed in % the output image. On any given iteration over the tree, it selects % those nodes whose E count is minimal for pruning and merges their color % statistics upward. It uses a pruning threshold, Ep, to govern node % selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors within % the cubic volume which the node represents. This includes n1 - n2 % pixels whose colors should be defined by nodes at a lower level in the % tree. % % Assignment generates the output image from the pruned tree. The output % image consists of two parts: (1) A color map, which is an array of % color descriptions (RGB triples) for each color present in the output % image; (2) A pixel array, which represents each pixel as an index % into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % This method is based on a similar algorithm written by Paul Raveling. % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/histogram.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" /* Define declarations. */ #if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE) #define CacheShift 2 #else #define CacheShift 3 #endif #define ErrorQueueLength 16 #define MaxNodes 266817 #define MaxTreeDepth 8 #define NodesInAList 1920 /* Typdef declarations. */ typedef struct _NodeInfo { struct _NodeInfo *parent, *child[16]; MagickSizeType number_unique; DoublePixelPacket total_color; MagickRealType quantize_error; size_t color_number, id, level; } NodeInfo; typedef struct _Nodes { NodeInfo *nodes; struct _Nodes *next; } Nodes; typedef struct _CubeInfo { NodeInfo *root; size_t colors, maximum_colors; ssize_t transparent_index; MagickSizeType transparent_pixels; DoublePixelPacket target; MagickRealType distance, pruning_threshold, next_threshold; size_t nodes, free_nodes, color_number; NodeInfo *next_node; Nodes *node_queue; MemoryInfo *memory_info; ssize_t *cache; DoublePixelPacket error[ErrorQueueLength]; MagickRealType weights[ErrorQueueLength]; QuantizeInfo *quantize_info; MagickBooleanType associate_alpha; ssize_t x, y; size_t depth; MagickOffsetType offset; MagickSizeType span; } CubeInfo; /* Method prototypes. */ static CubeInfo *GetCubeInfo(const QuantizeInfo *,const size_t,const size_t); static NodeInfo *GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *); static MagickBooleanType AssignImageColors(Image *,CubeInfo *), ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *), DitherImage(Image *,CubeInfo *), SetGrayscaleImage(Image *); static void ClosestColor(const Image *,CubeInfo *,const NodeInfo *), DefineImageColormap(Image *,CubeInfo *,NodeInfo *), DestroyCubeInfo(CubeInfo *), PruneLevel(CubeInfo *,const NodeInfo *), PruneToCubeDepth(CubeInfo *,const NodeInfo *), ReduceImageColors(const Image *,CubeInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireQuantizeInfo() allocates the QuantizeInfo structure. % % The format of the AcquireQuantizeInfo method is: % % QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) { QuantizeInfo *quantize_info; quantize_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*quantize_info)); if (quantize_info == (QuantizeInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetQuantizeInfo(quantize_info); if (image_info != (ImageInfo *) NULL) { const char *option; quantize_info->dither=image_info->dither; option=GetImageOption(image_info,"dither"); if (option != (const char *) NULL) quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,option); quantize_info->measure_error=image_info->verbose; } return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A s s i g n I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AssignImageColors() generates the output image from the pruned tree. The % output image consists of two parts: (1) A color map, which is an array % of color descriptions (RGB triples) for each color present in the % output image; (2) A pixel array, which represents each pixel as an % index into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % The format of the AssignImageColors() method is: % % MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static inline void AssociateAlphaPixel(const CubeInfo *cube_info, const PixelPacket *pixel,DoublePixelPacket *alpha_pixel) { MagickRealType alpha; alpha_pixel->index=0; if ((cube_info->associate_alpha == MagickFalse) || (pixel->opacity == OpaqueOpacity)) { alpha_pixel->red=(MagickRealType) GetPixelRed(pixel); alpha_pixel->green=(MagickRealType) GetPixelGreen(pixel); alpha_pixel->blue=(MagickRealType) GetPixelBlue(pixel); alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel); return; } alpha=(MagickRealType) (QuantumScale*(QuantumRange-GetPixelOpacity(pixel))); alpha_pixel->red=alpha*GetPixelRed(pixel); alpha_pixel->green=alpha*GetPixelGreen(pixel); alpha_pixel->blue=alpha*GetPixelBlue(pixel); alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel); } static inline size_t ColorToNodeId(const CubeInfo *cube_info, const DoublePixelPacket *pixel,size_t index) { size_t id; id=(size_t) (((ScaleQuantumToChar(ClampPixel(GetPixelRed(pixel))) >> index) & 0x01) | ((ScaleQuantumToChar(ClampPixel(GetPixelGreen(pixel))) >> index) & 0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(GetPixelBlue(pixel))) >> index) & 0x01) << 2); if (cube_info->associate_alpha != MagickFalse) id|=((ScaleQuantumToChar(ClampPixel(GetPixelOpacity(pixel))) >> index) & 0x1) << 3; return(id); } static inline MagickBooleanType IsSameColor(const Image *image, const PixelPacket *p,const PixelPacket *q) { if ((GetPixelRed(p) != GetPixelRed(q)) || (GetPixelGreen(p) != GetPixelGreen(q)) || (GetPixelBlue(p) != GetPixelBlue(q))) return(MagickFalse); if ((image->matte != MagickFalse) && (GetPixelOpacity(p) != GetPixelOpacity(q))) return(MagickFalse); return(MagickTrue); } static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) { #define AssignImageTag "Assign/Image" ColorspaceType colorspace; ssize_t y; size_t number_colors; /* Allocate image colormap. */ colorspace=image->colorspace; if (cube_info->quantize_info->colorspace != UndefinedColorspace) (void) TransformImageColorspace(image,cube_info->quantize_info->colorspace); number_colors=MagickMax(cube_info->colors,cube_info->maximum_colors); if (AcquireImageColormap(image,number_colors) == MagickFalse) ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; cube_info->transparent_pixels=0; cube_info->transparent_index=(-1); DefineImageColormap(image,cube_info,cube_info->root); /* Create a reduced color image. */ if ((cube_info->quantize_info->dither != MagickFalse) && (cube_info->quantize_info->dither_method != NoDitherMethod)) (void) DitherImage(image,cube_info); else { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CubeInfo cube; IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; ssize_t count; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); cube=(*cube_info); for (x=0; x < (ssize_t) image->columns; x+=count) { DoublePixelPacket pixel; const NodeInfo *node_info; ssize_t i; size_t id, index; /* Identify the deepest node containing the pixel's color. */ for (count=1; (x+count) < (ssize_t) image->columns; count++) if (IsSameColor(image,q,q+count) == MagickFalse) break; AssociateAlphaPixel(&cube,q,&pixel); node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)* (QuantumRange+1.0)+1.0); ClosestColor(image,&cube,node_info->parent); index=cube.color_number; for (i=0; i < (ssize_t) count; i++) { if (image->storage_class == PseudoClass) SetPixelIndex(indexes+x+i,index); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRgb(q,image->colormap+index); if (cube.associate_alpha != MagickFalse) SetPixelOpacity(q,image->colormap[index].opacity); } q++; } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } if (cube_info->quantize_info->measure_error != MagickFalse) (void) GetImageQuantizeError(image); if ((cube_info->quantize_info->number_colors == 2) && ((cube_info->quantize_info->colorspace == LinearGRAYColorspace) || (cube_info->quantize_info->colorspace == GRAYColorspace))) { double intensity; /* Monochrome image. */ intensity=GetPixelLuma(image,image->colormap+0) < QuantumRange/2.0 ? 0.0 : QuantumRange; if ((image->colors > 1) && (GetPixelLuma(image,image->colormap+0) > GetPixelLuma(image,image->colormap+1))) intensity=(double) QuantumRange; image->colormap[0].red=intensity; image->colormap[0].green=intensity; image->colormap[0].blue=intensity; if (image->colors > 1) { image->colormap[1].red=(double) QuantumRange-intensity; image->colormap[1].green=(double) QuantumRange-intensity; image->colormap[1].blue=(double) QuantumRange-intensity; } } (void) SyncImage(image); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (IssRGBCompatibleColorspace(colorspace) == MagickFalse)) (void) TransformImageColorspace(image,colorspace); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClassifyImageColors() begins by initializing a color description tree % of sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color % description tree in the storage_class phase for realistic values of % Cmax. If colors components in the input image are quantized to k-bit % precision, so that Cmax= 2k-1, the tree would need k levels below the % root node to allow representing each possible input color in a leaf. % This becomes prohibitive because the tree's total number of nodes is % 1 + sum(i=1,k,8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing It updates the following data for each such node: % % n1 : Number of pixels whose color is contained in the RGB cube % which this node represents; % % n2 : Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb : Sums of the red, green, and blue component values for % all pixels not classified at a lower depth. The combination of % these sums and n2 will ultimately characterize the mean color of a % set of pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the quantization % error for a node. % % The format of the ClassifyImageColors() method is: % % MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, % const Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o image: the image. % */ static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info) { MagickBooleanType associate_alpha; associate_alpha=image->matte; if ((cube_info->quantize_info->number_colors == 2) && ((cube_info->quantize_info->colorspace == LinearGRAYColorspace) || (cube_info->quantize_info->colorspace == GRAYColorspace))) associate_alpha=MagickFalse; cube_info->associate_alpha=associate_alpha; } static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, const Image *image,ExceptionInfo *exception) { #define ClassifyImageTag "Classify/Image" CacheView *image_view; DoublePixelPacket error, mid, midpoint, pixel; MagickBooleanType proceed; MagickRealType bisect; NodeInfo *node_info; size_t count, id, index, level; ssize_t y; /* Classify the first cube_info->maximum_colors colors to a tree depth of 8. */ SetAssociatedAlpha(image,cube_info); if (cube_info->quantize_info->colorspace != image->colorspace) { if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace); else if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace((Image *) image,sRGBColorspace); } midpoint.red=(MagickRealType) QuantumRange/2.0; midpoint.green=(MagickRealType) QuantumRange/2.0; midpoint.blue=(MagickRealType) QuantumRange/2.0; midpoint.opacity=(MagickRealType) QuantumRange/2.0; midpoint.index=(MagickRealType) QuantumRange/2.0; error.opacity=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const PixelPacket *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) if (IsSameColor(image,p,p+count) == MagickFalse) break; AssociateAlphaPixel(cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((MagickRealType) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= MaxTreeDepth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.opacity+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); continue; } if (level == MaxTreeDepth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.opacity=QuantumScale*(pixel.opacity-mid.opacity); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.opacity*error.opacity); if (IsNaN(distance) != 0) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.opacity+=count*QuantumScale* ClampPixel(pixel.opacity); else node_info->total_color.opacity+=count*QuantumScale* ClampPixel(OpaqueOpacity); p+=count; } if (cube_info->colors > cube_info->maximum_colors) { PruneToCubeDepth(cube_info,cube_info->root); break; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } for (y++; y < (ssize_t) image->rows; y++) { const PixelPacket *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) if (IsSameColor(image,p,p+count) == MagickFalse) break; AssociateAlphaPixel(cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((MagickRealType) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= cube_info->depth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.opacity+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s", image->filename); continue; } if (level == cube_info->depth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.opacity=QuantumScale*(pixel.opacity-mid.opacity); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.opacity*error.opacity); if (IsNaN(distance) != 0) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.opacity+=count*QuantumScale*ClampPixel( pixel.opacity); else node_info->total_color.opacity+=count*QuantumScale* ClampPixel(OpaqueOpacity); p+=count; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } image_view=DestroyCacheView(image_view); if (cube_info->quantize_info->colorspace != image->colorspace) if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,sRGBColorspace); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneQuantizeInfo() makes a duplicate of the given quantize info structure, % or if quantize info is NULL, a new one. % % The format of the CloneQuantizeInfo method is: % % QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o clone_info: Method CloneQuantizeInfo returns a duplicate of the given % quantize info, or if image info is NULL a new one. % % o quantize_info: a structure of type info. % */ MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) { QuantizeInfo *clone_info; clone_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*clone_info)); if (clone_info == (QuantizeInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetQuantizeInfo(clone_info); if (quantize_info == (QuantizeInfo *) NULL) return(clone_info); clone_info->number_colors=quantize_info->number_colors; clone_info->tree_depth=quantize_info->tree_depth; clone_info->dither=quantize_info->dither; clone_info->dither_method=quantize_info->dither_method; clone_info->colorspace=quantize_info->colorspace; clone_info->measure_error=quantize_info->measure_error; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o s e s t C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClosestColor() traverses the color cube tree at a particular node and % determines which colormap entry best represents the input color. % % The format of the ClosestColor method is: % % void ClosestColor(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void ClosestColor(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) ClosestColor(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { MagickRealType pixel; DoublePixelPacket *magick_restrict q; MagickRealType distance; PixelPacket *magick_restrict p; /* Determine if this color is "closest". */ p=image->colormap+node_info->color_number; q=(&cube_info->target); pixel=GetPixelRed(p)-GetPixelRed(q); distance=pixel*pixel; if (distance <= cube_info->distance) { pixel=GetPixelGreen(p)-GetPixelGreen(q); distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=GetPixelBlue(p)-GetPixelBlue(q); distance+=pixel*pixel; if (distance <= cube_info->distance) { if (cube_info->associate_alpha != MagickFalse) { pixel=GetPixelAlpha(p)-GetPixelAlpha(q); distance+=pixel*pixel; } if (distance <= cube_info->distance) { cube_info->distance=distance; cube_info->color_number=node_info->color_number; } } } } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p r e s s I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompressImageColormap() compresses an image colormap by removing any % duplicate or unused color entries. % % The format of the CompressImageColormap method is: % % MagickBooleanType CompressImageColormap(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType CompressImageColormap(Image *image) { QuantizeInfo quantize_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsPaletteImage(image,&image->exception) == MagickFalse) return(MagickFalse); GetQuantizeInfo(&quantize_info); quantize_info.number_colors=image->colors; quantize_info.tree_depth=MaxTreeDepth; return(QuantizeImage(&quantize_info,image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineImageColormap() traverses the color cube tree and notes each colormap % entry. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. % % The format of the DefineImageColormap method is: % % void DefineImageColormap(Image *image,CubeInfo *cube_info, % NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void DefineImageColormap(Image *image,CubeInfo *cube_info, NodeInfo *node_info) { ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) DefineImageColormap(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { MagickRealType alpha; PixelPacket *magick_restrict q; /* Colormap entry is defined by the mean color in this cube. */ q=image->colormap+image->colors; alpha=(MagickRealType) ((MagickOffsetType) node_info->number_unique); alpha=PerceptibleReciprocal(alpha); if (cube_info->associate_alpha == MagickFalse) { SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.blue))); SetPixelOpacity(q,OpaqueOpacity); } else { MagickRealType opacity; opacity=(MagickRealType) (alpha*QuantumRange* node_info->total_color.opacity); SetPixelOpacity(q,ClampToQuantum(opacity)); if (q->opacity == OpaqueOpacity) { SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.blue))); } else { double gamma; gamma=(double) (QuantumScale*(QuantumRange-(double) q->opacity)); gamma=PerceptibleReciprocal(gamma); SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha* gamma*QuantumRange*node_info->total_color.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha* gamma*QuantumRange*node_info->total_color.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha* gamma*QuantumRange*node_info->total_color.blue))); if (node_info->number_unique > cube_info->transparent_pixels) { cube_info->transparent_pixels=node_info->number_unique; cube_info->transparent_index=(ssize_t) image->colors; } } } node_info->color_number=image->colors++; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyCubeInfo() deallocates memory associated with an image. % % The format of the DestroyCubeInfo method is: % % DestroyCubeInfo(CubeInfo *cube_info) % % A description of each parameter follows: % % o cube_info: the address of a structure of type CubeInfo. % */ static void DestroyCubeInfo(CubeInfo *cube_info) { Nodes *nodes; /* Release color cube tree storage. */ do { nodes=cube_info->node_queue->next; cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory( cube_info->node_queue->nodes); cube_info->node_queue=(Nodes *) RelinquishMagickMemory( cube_info->node_queue); cube_info->node_queue=nodes; } while (cube_info->node_queue != (Nodes *) NULL); if (cube_info->memory_info != (MemoryInfo *) NULL) cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info); cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info); cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo % structure. % % The format of the DestroyQuantizeInfo method is: % % QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % */ MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); quantize_info->signature=(~MagickCoreSignature); quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info); return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DitherImage() distributes the difference between an original image and % the corresponding color reduced algorithm to neighboring pixels using % serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns % MagickTrue if the image is dithered otherwise MagickFalse. % % The format of the DitherImage method is: % % MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels) { ssize_t i; assert(pixels != (DoublePixelPacket **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (DoublePixelPacket *) NULL) pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static DoublePixelPacket **AcquirePixelThreadSet(const size_t count) { DoublePixelPacket **pixels; ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (DoublePixelPacket **) NULL) return((DoublePixelPacket **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count, 2*sizeof(**pixels)); if (pixels[i] == (DoublePixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static inline ssize_t CacheOffset(CubeInfo *cube_info, const DoublePixelPacket *pixel) { #define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift))) #define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift))) #define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift))) #define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift))) ssize_t offset; offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) | GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) | BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue)))); if (cube_info->associate_alpha != MagickFalse) offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->opacity))); return(offset); } static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info) { #define DitherImageTag "Dither/Image" CacheView *image_view; const char *artifact; double amount; DoublePixelPacket **pixels; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; /* Distribute quantization error using Floyd-Steinberg. */ pixels=AcquirePixelThreadSet(image->columns); if (pixels == (DoublePixelPacket **) NULL) return(MagickFalse); exception=(&image->exception); status=MagickTrue; amount=1.0; artifact=GetImageArtifact(image,"dither:diffusion-amount"); if (artifact != (const char *) NULL) amount=StringToDoubleInterval(artifact,1.0); image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); CubeInfo cube; DoublePixelPacket *current, *previous; IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; size_t index; ssize_t v; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); cube=(*cube_info); current=pixels[id]+(y & 0x01)*image->columns; previous=pixels[id]+((y+1) & 0x01)*image->columns; v=(ssize_t) ((y & 0x01) ? -1 : 1); for (x=0; x < (ssize_t) image->columns; x++) { DoublePixelPacket color, pixel; ssize_t i; ssize_t u; u=(y & 0x01) ? (ssize_t) image->columns-1-x : x; AssociateAlphaPixel(&cube,q+u,&pixel); if (x > 0) { pixel.red+=7.0*amount*current[u-v].red/16; pixel.green+=7.0*amount*current[u-v].green/16; pixel.blue+=7.0*amount*current[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=7.0*amount*current[u-v].opacity/16; } if (y > 0) { if (x < (ssize_t) (image->columns-1)) { pixel.red+=previous[u+v].red/16; pixel.green+=previous[u+v].green/16; pixel.blue+=previous[u+v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=previous[u+v].opacity/16; } pixel.red+=5.0*amount*previous[u].red/16; pixel.green+=5.0*amount*previous[u].green/16; pixel.blue+=5.0*amount*previous[u].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=5.0*amount*previous[u].opacity/16; if (x > 0) { pixel.red+=3.0*amount*previous[u-v].red/16; pixel.green+=3.0*amount*previous[u-v].green/16; pixel.blue+=3.0*amount*previous[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=3.0*amount*previous[u-v].opacity/16; } } pixel.red=(MagickRealType) ClampPixel(pixel.red); pixel.green=(MagickRealType) ClampPixel(pixel.green); pixel.blue=(MagickRealType) ClampPixel(pixel.blue); if (cube.associate_alpha != MagickFalse) pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity); i=CacheOffset(&cube,&pixel); if (cube.cache[i] < 0) { NodeInfo *node_info; size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*(QuantumRange+ 1.0)+1.0); ClosestColor(image,&cube,node_info->parent); cube.cache[i]=(ssize_t) cube.color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) cube.cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(indexes+u,index); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRgb(q+u,image->colormap+index); if (cube.associate_alpha != MagickFalse) SetPixelOpacity(q+u,image->colormap[index].opacity); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; /* Store the error. */ AssociateAlphaPixel(&cube,image->colormap+index,&color); current[u].red=pixel.red-color.red; current[u].green=pixel.green-color.green; current[u].blue=pixel.blue-color.blue; if (cube.associate_alpha != MagickFalse) current[u].opacity=pixel.opacity-color.opacity; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } image_view=DestroyCacheView(image_view); pixels=DestroyPixelThreadSet(pixels); return(MagickTrue); } static MagickBooleanType RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int); static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info, const size_t level,const unsigned int direction) { if (level == 1) switch (direction) { case WestGravity: { (void) RiemersmaDither(image,image_view,cube_info,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); break; } case EastGravity: { (void) RiemersmaDither(image,image_view,cube_info,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); break; } case NorthGravity: { (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); break; } case SouthGravity: { (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); break; } default: break; } else switch (direction) { case WestGravity: { Riemersma(image,image_view,cube_info,level-1,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); Riemersma(image,image_view,cube_info,level-1,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); Riemersma(image,image_view,cube_info,level-1,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); Riemersma(image,image_view,cube_info,level-1,SouthGravity); break; } case EastGravity: { Riemersma(image,image_view,cube_info,level-1,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); Riemersma(image,image_view,cube_info,level-1,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); Riemersma(image,image_view,cube_info,level-1,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); Riemersma(image,image_view,cube_info,level-1,NorthGravity); break; } case NorthGravity: { Riemersma(image,image_view,cube_info,level-1,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); Riemersma(image,image_view,cube_info,level-1,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); Riemersma(image,image_view,cube_info,level-1,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); Riemersma(image,image_view,cube_info,level-1,EastGravity); break; } case SouthGravity: { Riemersma(image,image_view,cube_info,level-1,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); Riemersma(image,image_view,cube_info,level-1,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); Riemersma(image,image_view,cube_info,level-1,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); Riemersma(image,image_view,cube_info,level-1,WestGravity); break; } default: break; } } static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view, CubeInfo *cube_info,const unsigned int direction) { #define DitherImageTag "Dither/Image" DoublePixelPacket color, pixel; MagickBooleanType proceed; CubeInfo *p; size_t index; p=cube_info; if ((p->x >= 0) && (p->x < (ssize_t) image->columns) && (p->y >= 0) && (p->y < (ssize_t) image->rows)) { ExceptionInfo *exception; IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t i; /* Distribute error. */ exception=(&image->exception); q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickFalse); indexes=GetCacheViewAuthenticIndexQueue(image_view); AssociateAlphaPixel(cube_info,q,&pixel); for (i=0; i < ErrorQueueLength; i++) { pixel.red+=p->weights[i]*p->error[i].red; pixel.green+=p->weights[i]*p->error[i].green; pixel.blue+=p->weights[i]*p->error[i].blue; if (cube_info->associate_alpha != MagickFalse) pixel.opacity+=p->weights[i]*p->error[i].opacity; } pixel.red=(MagickRealType) ClampPixel(pixel.red); pixel.green=(MagickRealType) ClampPixel(pixel.green); pixel.blue=(MagickRealType) ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity); i=CacheOffset(cube_info,&pixel); if (p->cache[i] < 0) { NodeInfo *node_info; size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=p->root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(cube_info,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ p->target=pixel; p->distance=(MagickRealType) (4.0*(QuantumRange+1.0)*((MagickRealType) QuantumRange+1.0)+1.0); ClosestColor(image,p,node_info->parent); p->cache[i]=(ssize_t) p->color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) (1*p->cache[i]); if (image->storage_class == PseudoClass) *indexes=(IndexPacket) index; if (cube_info->quantize_info->measure_error == MagickFalse) { SetPixelRgb(q,image->colormap+index); if (cube_info->associate_alpha != MagickFalse) SetPixelOpacity(q,image->colormap[index].opacity); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) return(MagickFalse); /* Propagate the error as the last entry of the error queue. */ (void) memmove(p->error,p->error+1,(ErrorQueueLength-1)* sizeof(p->error[0])); AssociateAlphaPixel(cube_info,image->colormap+index,&color); p->error[ErrorQueueLength-1].red=pixel.red-color.red; p->error[ErrorQueueLength-1].green=pixel.green-color.green; p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue; if (cube_info->associate_alpha != MagickFalse) p->error[ErrorQueueLength-1].opacity=pixel.opacity-color.opacity; proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span); if (proceed == MagickFalse) return(MagickFalse); p->offset++; } switch (direction) { case WestGravity: p->x--; break; case EastGravity: p->x++; break; case NorthGravity: p->y--; break; case SouthGravity: p->y++; break; } return(MagickTrue); } static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info) { CacheView *image_view; MagickBooleanType status; ssize_t i; size_t depth; if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod) return(FloydSteinbergDither(image,cube_info)); /* Distribute quantization error along a Hilbert curve. */ (void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error)); cube_info->x=0; cube_info->y=0; i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows); for (depth=1; i != 0; depth++) i>>=1; if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows)) depth++; cube_info->offset=0; cube_info->span=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,&image->exception); if (depth > 1) Riemersma(image,image_view,cube_info,depth-1,NorthGravity); status=RiemersmaDither(image,image_view,cube_info,ForgetGravity); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCubeInfo() initialize the Cube data structure. % % The format of the GetCubeInfo method is: % % CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info, % const size_t depth,const size_t maximum_colors) % % A description of each parameter follows. % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o depth: Normally, this integer value is zero or one. A zero or % one tells Quantize to choose a optimal tree depth of Log4(number_colors). % A tree of this depth generally allows the best representation of the % reference image with the least amount of memory and the fastest % computational speed. In some cases, such as an image with low color % dispersion (a few number of colors), a value other than % Log4(number_colors) is required. To expand the color tree completely, % use a value of 8. % % o maximum_colors: maximum colors. % */ static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info, const size_t depth,const size_t maximum_colors) { CubeInfo *cube_info; MagickRealType sum, weight; ssize_t i; size_t length; /* Initialize tree to describe color cube_info. */ cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info)); if (cube_info == (CubeInfo *) NULL) return((CubeInfo *) NULL); (void) memset(cube_info,0,sizeof(*cube_info)); cube_info->depth=depth; if (cube_info->depth > MaxTreeDepth) cube_info->depth=MaxTreeDepth; if (cube_info->depth < 2) cube_info->depth=2; cube_info->maximum_colors=maximum_colors; /* Initialize root node. */ cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL); if (cube_info->root == (NodeInfo *) NULL) return((CubeInfo *) NULL); cube_info->root->parent=cube_info->root; cube_info->quantize_info=CloneQuantizeInfo(quantize_info); if (cube_info->quantize_info->dither == MagickFalse) return(cube_info); /* Initialize dither resources. */ length=(size_t) (1UL << (4*(8-CacheShift))); cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache)); if (cube_info->memory_info == (MemoryInfo *) NULL) return((CubeInfo *) NULL); cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info); /* Initialize color cache. */ (void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length); /* Distribute weights along a curve of exponential decay. */ weight=1.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight); weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0)); } /* Normalize the weighting factors. */ weight=0.0; for (i=0; i < ErrorQueueLength; i++) weight+=cube_info->weights[i]; sum=0.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[i]/=weight; sum+=cube_info->weights[i]; } cube_info->weights[0]+=1.0-sum; return(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t N o d e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNodeInfo() allocates memory for a new node in the color cube tree and % presets all fields to zero. % % The format of the GetNodeInfo method is: % % NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, % const size_t level,NodeInfo *parent) % % A description of each parameter follows. % % o node: The GetNodeInfo method returns a pointer to a queue of nodes. % % o id: Specifies the child number of the node. % % o level: Specifies the level in the storage_class the node resides. % */ static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, const size_t level,NodeInfo *parent) { NodeInfo *node_info; if (cube_info->free_nodes == 0) { Nodes *nodes; /* Allocate a new queue of nodes. */ nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes)); if (nodes == (Nodes *) NULL) return((NodeInfo *) NULL); nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList, sizeof(*nodes->nodes)); if (nodes->nodes == (NodeInfo *) NULL) return((NodeInfo *) NULL); nodes->next=cube_info->node_queue; cube_info->node_queue=nodes; cube_info->next_node=nodes->nodes; cube_info->free_nodes=NodesInAList; } cube_info->nodes++; cube_info->free_nodes--; node_info=cube_info->next_node++; (void) memset(node_info,0,sizeof(*node_info)); node_info->parent=parent; node_info->id=id; node_info->level=level; return(node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t i z e E r r o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantizeError() measures the difference between the original % and quantized images. This difference is the total quantization error. % The error is computed by summing over all pixels in an image the distance % squared in RGB space between each reference pixel value and its quantized % value. These values are computed: % % o mean_error_per_pixel: This value is the mean error for any single % pixel in the image. % % o normalized_mean_square_error: This value is the normalized mean % quantization error for any single pixel in the image. This distance % measure is normalized to a range between 0 and 1. It is independent % of the range of red, green, and blue values in the image. % % o normalized_maximum_square_error: Thsi value is the normalized % maximum quantization error for any single pixel in the image. This % distance measure is normalized to a range between 0 and 1. It is % independent of the range of red, green, and blue values in your image. % % The format of the GetImageQuantizeError method is: % % MagickBooleanType GetImageQuantizeError(Image *image) % % A description of each parameter follows. % % o image: the image. % */ MagickExport MagickBooleanType GetImageQuantizeError(Image *image) { CacheView *image_view; ExceptionInfo *exception; IndexPacket *indexes; MagickRealType alpha, area, beta, distance, gamma, maximum_error, mean_error, mean_error_per_pixel; ssize_t index, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->total_colors=GetNumberColors(image,(FILE *) NULL,&image->exception); (void) memset(&image->error,0,sizeof(image->error)); if (image->storage_class == DirectClass) return(MagickTrue); alpha=1.0; beta=1.0; area=3.0*image->columns*image->rows; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; exception=(&image->exception); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const PixelPacket *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { index=(ssize_t) GetPixelIndex(indexes+x); if (image->matte != MagickFalse) { alpha=(MagickRealType) (QuantumScale*(GetPixelAlpha(p))); beta=(MagickRealType) (QuantumScale*(QuantumRange- image->colormap[index].opacity)); } distance=fabs((double) (alpha*GetPixelRed(p)-beta* image->colormap[index].red)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelGreen(p)-beta* image->colormap[index].green)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelBlue(p)-beta* image->colormap[index].blue)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; p++; } } image_view=DestroyCacheView(image_view); gamma=PerceptibleReciprocal(area); image->error.mean_error_per_pixel=gamma*mean_error_per_pixel; image->error.normalized_mean_error=gamma*QuantumScale*QuantumScale*mean_error; image->error.normalized_maximum_error=QuantumScale*maximum_error; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetQuantizeInfo() initializes the QuantizeInfo structure. % % The format of the GetQuantizeInfo method is: % % GetQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to a QuantizeInfo structure. % */ MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); (void) memset(quantize_info,0,sizeof(*quantize_info)); quantize_info->number_colors=256; quantize_info->dither=MagickTrue; quantize_info->dither_method=RiemersmaDitherMethod; quantize_info->colorspace=UndefinedColorspace; quantize_info->measure_error=MagickFalse; quantize_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o s t e r i z e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PosterizeImage() reduces the image to a limited number of colors for a % "poster" effect. % % The format of the PosterizeImage method is: % % MagickBooleanType PosterizeImage(Image *image,const size_t levels, % const MagickBooleanType dither) % MagickBooleanType PosterizeImageChannel(Image *image, % const ChannelType channel,const size_t levels, % const MagickBooleanType dither) % % A description of each parameter follows: % % o image: Specifies a pointer to an Image structure. % % o levels: Number of color levels allowed in each channel. Very low values % (2, 3, or 4) have the most visible effect. % % o dither: Set this integer value to something other than zero to dither % the mapped image. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels, const MagickBooleanType dither) { MagickBooleanType status; status=PosterizeImageChannel(image,DefaultChannels,levels,dither); return(status); } MagickExport MagickBooleanType PosterizeImageChannel(Image *image, const ChannelType channel,const size_t levels,const MagickBooleanType dither) { #define PosterizeImageTag "Posterize/Image" #define PosterizePixel(pixel) ClampToQuantum((MagickRealType) QuantumRange*( \ MagickRound(QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1)) CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; QuantizeInfo *quantize_info; ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { /* Posterize colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=PosterizePixel(image->colormap[i].red); if ((channel & GreenChannel) != 0) image->colormap[i].green=PosterizePixel(image->colormap[i].green); if ((channel & BlueChannel) != 0) image->colormap[i].blue=PosterizePixel(image->colormap[i].blue); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=PosterizePixel(image->colormap[i].opacity); } /* Posterize image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,PosterizePixel(GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,PosterizePixel(GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,PosterizePixel(GetPixelBlue(q))); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,PosterizePixel(GetPixelOpacity(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,PosterizePixel(GetPixelIndex(indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels* levels,MaxColormapSize+1); quantize_info->dither=dither; quantize_info->tree_depth=MaxTreeDepth; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e C h i l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneChild() deletes the given node and merges its statistics into its % parent. % % The format of the PruneSubtree method is: % % PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) { NodeInfo *parent; ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneChild(cube_info,node_info->child[i]); /* Merge color statistics into parent. */ parent=node_info->parent; parent->number_unique+=node_info->number_unique; parent->total_color.red+=node_info->total_color.red; parent->total_color.green+=node_info->total_color.green; parent->total_color.blue+=node_info->total_color.blue; parent->total_color.opacity+=node_info->total_color.opacity; parent->child[node_info->id]=(NodeInfo *) NULL; cube_info->nodes--; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e L e v e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneLevel() deletes all nodes at the bottom level of the color tree merging % their color statistics into their parent node. % % The format of the PruneLevel method is: % % PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) { ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneLevel(cube_info,node_info->child[i]); if (node_info->level == cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e T o C u b e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneToCubeDepth() deletes any nodes at a depth greater than % cube_info->depth while merging their color statistics into their parent % node. % % The format of the PruneToCubeDepth method is: % % PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) { ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneToCubeDepth(cube_info,node_info->child[i]); if (node_info->level > cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImage() analyzes the colors within a reference image and chooses a % fixed number of colors to represent the image. The goal of the algorithm % is to minimize the color difference between the input and output image while % minimizing the processing time. % % The format of the QuantizeImage method is: % % MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, % Image *image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % */ MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, Image *image) { CubeInfo *cube_info; MagickBooleanType status; size_t depth, maximum_colors; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; if (image->matte == MagickFalse) { if (SetImageGray(image,&image->exception) != MagickFalse) (void) SetGrayscaleImage(image); } depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if ((quantize_info->dither != MagickFalse) && (depth > 2)) depth--; if ((image->matte != MagickFalse) && (depth > 5)) depth--; if (SetImageGray(image,&image->exception) != MagickFalse) depth=MaxTreeDepth; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,image,&image->exception); if (status != MagickFalse) { /* Reduce the number of colors in the image. */ if (cube_info->colors > cube_info->maximum_colors) ReduceImageColors(image,cube_info); status=AssignImageColors(image,cube_info); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImages() analyzes the colors within a set of reference images and % chooses a fixed number of colors to represent the set. The goal of the % algorithm is to minimize the color difference between the input and output % images while minimizing the processing time. % % The format of the QuantizeImages method is: % % MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, % Image *images) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: Specifies a pointer to a list of Image structures. % */ MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, Image *images) { CubeInfo *cube_info; Image *image; MagickBooleanType proceed, status; MagickProgressMonitor progress_monitor; ssize_t i; size_t depth, maximum_colors, number_images; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); if (GetNextImageInList(images) == (Image *) NULL) { /* Handle a single image with QuantizeImage. */ status=QuantizeImage(quantize_info,images); return(status); } status=MagickFalse; maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if (quantize_info->dither != MagickFalse) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) { (void) ThrowMagickException(&images->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return(MagickFalse); } number_images=GetImageListLength(images); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL, image->client_data); status=ClassifyImageColors(cube_info,image,&image->exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor,image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } if (status != MagickFalse) { /* Reduce the number of colors in an image sequence. */ ReduceImageColors(images,cube_info); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,image->client_data); status=AssignImageColors(image,cube_info); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor, image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u a n t i z e E r r o r F l a t t e n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeErrorFlatten() traverses the color cube and flattens the quantization % error into a sorted 1D array. This accelerates the color reduction process. % % Contributed by Yoya. % % The format of the QuantizeErrorFlatten method is: % % size_t QuantizeErrorFlatten(const CubeInfo *cube_info, % const NodeInfo *node_info,const ssize_t offset, % MagickRealType *quantize_error) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is current pointer. % % o offset: quantize error offset. % % o quantize_error: the quantization error vector. % */ static size_t QuantizeErrorFlatten(const CubeInfo *cube_info, const NodeInfo *node_info,const ssize_t offset, MagickRealType *quantize_error) { ssize_t i; size_t n, number_children; if (offset >= (ssize_t) cube_info->nodes) return(0); quantize_error[offset]=node_info->quantize_error; n=1; number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children ; i++) if (node_info->child[i] != (NodeInfo *) NULL) n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n, quantize_error); return(n); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Reduce() traverses the color cube tree and prunes any node whose % quantization error falls below a particular threshold. % % The format of the Reduce method is: % % Reduce(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info) { ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) Reduce(cube_info,node_info->child[i]); if (node_info->quantize_error <= cube_info->pruning_threshold) PruneChild(cube_info,node_info); else { /* Find minimum pruning threshold. */ if (node_info->number_unique > 0) cube_info->colors++; if (node_info->quantize_error < cube_info->next_threshold) cube_info->next_threshold=node_info->quantize_error; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReduceImageColors() repeatedly prunes the tree until the number of nodes % with n2 > 0 is less than or equal to the maximum number of colors allowed % in the output image. On any given iteration over the tree, it selects % those nodes whose E value is minimal for pruning and merges their % color statistics upward. It uses a pruning threshold, Ep, to govern % node selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors % within the cubic volume which the node represents. This includes n1 - % n2 pixels whose colors should be defined by nodes at a lower level in % the tree. % % The format of the ReduceImageColors method is: % % ReduceImageColors(const Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static int MagickRealTypeCompare(const void *error_p,const void *error_q) { MagickRealType *p, *q; p=(MagickRealType *) error_p; q=(MagickRealType *) error_q; if (*p > *q) return(1); if (fabs((double) (*q-*p)) <= MagickEpsilon) return(0); return(-1); } static void ReduceImageColors(const Image *image,CubeInfo *cube_info) { #define ReduceImageTag "Reduce/Image" MagickBooleanType proceed; MagickOffsetType offset; size_t span; cube_info->next_threshold=0.0; if (cube_info->colors > cube_info->maximum_colors) { MagickRealType *quantize_error; /* Enable rapid reduction of the number of unique colors. */ quantize_error=(MagickRealType *) AcquireQuantumMemory(cube_info->nodes, sizeof(*quantize_error)); if (quantize_error != (MagickRealType *) NULL) { (void) QuantizeErrorFlatten(cube_info,cube_info->root,0, quantize_error); qsort(quantize_error,cube_info->nodes,sizeof(MagickRealType), MagickRealTypeCompare); if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100)) cube_info->next_threshold=quantize_error[cube_info->nodes-110* (cube_info->maximum_colors+1)/100]; quantize_error=(MagickRealType *) RelinquishMagickMemory( quantize_error); } } for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; ) { cube_info->pruning_threshold=cube_info->next_threshold; cube_info->next_threshold=cube_info->root->quantize_error-1; cube_info->colors=0; Reduce(cube_info,cube_info->root); offset=(MagickOffsetType) span-cube_info->colors; proceed=SetImageProgress(image,ReduceImageTag,offset,span- cube_info->maximum_colors+1); if (proceed == MagickFalse) break; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImage() replaces the colors of an image with the closest color from % a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, % Image *image,const Image *remap_image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o remap_image: the reference image. % */ MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, Image *image,const Image *remap_image) { CubeInfo *cube_info; MagickBooleanType status; /* Initialize color cube. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(remap_image != (Image *) NULL); assert(remap_image->signature == MagickCoreSignature); cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,&image->exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; status=AssignImageColors(image,cube_info); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImages() replaces the colors of a sequence of images with the % closest color from a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, % Image *images,Image *remap_image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: the image sequence. % % o remap_image: the reference image. % */ MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, Image *images,const Image *remap_image) { CubeInfo *cube_info; Image *image; MagickBooleanType status; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); image=images; if (remap_image == (Image *) NULL) { /* Create a global colormap for an image sequence. */ status=QuantizeImages(quantize_info,images); return(status); } /* Classify image colors from the reference image. */ cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,&image->exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) { status=AssignImageColors(image,cube_info); if (status == MagickFalse) break; } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetGrayscaleImage() converts an image to a PseudoClass grayscale image. % % The format of the SetGrayscaleImage method is: % % MagickBooleanType SetGrayscaleImage(Image *image) % % A description of each parameter follows: % % o image: The image. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { double intensity; PixelPacket *color_1, *color_2; color_1=(PixelPacket *) x; color_2=(PixelPacket *) y; intensity=PixelPacketIntensity(color_1)-PixelPacketIntensity(color_2); if (intensity < (double) INT_MIN) intensity=(double) INT_MIN; if (intensity > (double) INT_MAX) intensity=(double) INT_MAX; return((int) intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static MagickBooleanType SetGrayscaleImage(Image *image) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; PixelPacket *colormap; ssize_t i; size_t extent; ssize_t *colormap_index, j, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); exception=(&image->exception); if (image->type != GrayscaleType) (void) TransformImageColorspace(image,GRAYColorspace); extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1)); colormap_index=(ssize_t *) AcquireQuantumMemory(extent, sizeof(*colormap_index)); if (colormap_index == (ssize_t *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->storage_class != PseudoClass) { (void) memset(colormap_index,(-1),extent*sizeof(*colormap_index)); if (AcquireImageColormap(image,MaxColormapSize) == MagickFalse) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } image->colors=0; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { size_t intensity; intensity=ScaleQuantumToMap(GetPixelRed(q)); if (colormap_index[intensity] < 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetGrayscaleImage) #endif if (colormap_index[intensity] < 0) { colormap_index[intensity]=(ssize_t) image->colors; image->colormap[image->colors].red=GetPixelRed(q); image->colormap[image->colors].green=GetPixelGreen(q); image->colormap[image->colors].blue=GetPixelBlue(q); image->colors++; } } SetPixelIndex(indexes+x,colormap_index[intensity]); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); } (void) memset(colormap_index,0,extent*sizeof(*colormap_index)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].opacity=(Quantum) i; qsort((void *) image->colormap,image->colors,sizeof(PixelPacket), IntensityCompare); colormap=(PixelPacket *) AcquireQuantumMemory(image->colors, sizeof(*colormap)); if (colormap == (PixelPacket *) NULL) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } j=0; colormap[j]=image->colormap[0]; for (i=0; i < (ssize_t) image->colors; i++) { if (IsSameColor(image,&colormap[j],&image->colormap[i]) == MagickFalse) { j++; colormap[j]=image->colormap[i]; } colormap_index[(ssize_t) image->colormap[i].opacity]=j; } image->colors=(size_t) (j+1); image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap); image->colormap=colormap; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; const PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,colormap_index[ScaleQuantumToMap(GetPixelIndex( indexes+x))]); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); image->type=GrayscaleType; if (SetImageMonochrome(image,&image->exception) != MagickFalse) image->type=BilevelType; return(status); }
integral_critical.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <math.h> #include <time.h> double integrand(double x) { return 4 / (1 + x * x); } int main(int argc, char* argv[]) { double begin, end; int t_count = 1; if (argc != 1) { t_count = (int) strtol(argv[1],NULL,10); } omp_set_num_threads(t_count); double int_value = 0; int iters_count = pow(10, 8); begin = omp_get_wtime(); #pragma omp parallel shared(int_value) { double local_sum = 0; #pragma omp for for(int i = 0; i < iters_count - 1; i++) { double left = i * 1.0 / iters_count; double right = (i + 1) * 1.0 / iters_count; local_sum += (integrand(left) + integrand(right)) / 2; } #pragma omp critical { int_value += local_sum; } } end = omp_get_wtime(); printf("The value of the integral is %f\n", int_value / iters_count); printf("time: %f\n", end - begin); return 0; }
GB_unop__identity_uint64_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_uint16) // op(A') function: GB (_unop_tran__identity_uint64_uint16) // C type: uint64_t // A type: uint16_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = (uint64_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_uint16) ( uint64_t *Cx, // Cx and Ax may be aliased const uint16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint16_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_uint16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mlpcell_bf16.h
#ifndef MLPCELL_BF16 #define MLPCELL_BF16 #include "mc_funcs.h" #define PCL_ASSERT(cond, x...) do { if(!(cond)) { printf(x); fflush(stdout); exit(1); } } while(0) #define DECL_VLA_PTR(type, name, dims, ptr) type (*name)dims = (type (*)dims)ptr #define DECL_VLA_PTR_CHECK_VAR(var, type, name, dims, ptr) type (*name)dims = (var > 0) ? (type (*)dims)ptr : NULL #define DECL_VLA_PTR_CHECK_COND(cond, type, name, dims, ptr) type (*name)dims = cond ? (type (*)dims)ptr : NULL #define DECL_VLA_PTR_CHECK_COND_VAR(cond, var, type, name, dims, ptr) type (*name)dims = (cond && var > 0) ? (type (*)dims)ptr : NULL #define DECL_VLA_PTR_PT(type, name, dims, t) type (*name)dims = (type (*)dims)(t.data_ptr<type>()) #define DECL_VLA_PTR_PT_CHECK_COND(cond, type, name, dims, t) type (*name)dims = cond ? (type (*)dims)(t.data_ptr<type>()) : NULL #define DECL_VLA_PTR_NPT(newtype, type, name, dims, t) newtype (*name)dims = (newtype (*)dims)(t.data_ptr<type>()) #define DECL_VLA_PTR_NPT_CHECK_COND(cond, newtype, type, name, dims, t) newtype (*name)dims = cond ? (newtype (*)dims)(t.data_ptr<type>()) : NULL #define LIBXSMM_ALIGNDOWN(N, A) ((N) & ~((A)-1)) //--------------------------------------norm_to_vnni----------------------------------------------------- // void norm_to_vnni_16b(libxsmm_bfloat16* in, libxsmm_bfloat16* out, int N, int M) { libxsmm_meltw_unary_param trans_param; libxsmm_meltw_unary_type trans_type; trans_param.in.primary = (void*)in; trans_param.out.primary = (void*)out; if ( N % 2 == 1 ) { trans_type = LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_VNNI_PAD; } else { trans_type = LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_VNNI; } libxsmm_meltwfunction_unary trans_kernel = libxsmm_dispatch_meltw_unary(M, N, (libxsmm_blasint*)&M, (libxsmm_blasint*)&M, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, trans_type); if ( trans_kernel == NULL ) { fprintf( stderr, "JIT for NORM_TO_VNNI TPP. Bailing...!\n"); exit(-1); } trans_kernel( &trans_param ); } //--------------------------------------norm_to_normT----------------------------------------------------- // void norm_to_normT_16b(libxsmm_bfloat16* in, libxsmm_bfloat16* out, int N, int M) { libxsmm_meltw_unary_param trans_param; libxsmm_meltw_unary_type trans_type; trans_param.in.primary = (void*)in; trans_param.out.primary = (void*)out; trans_type = LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_NORMT; libxsmm_meltwfunction_unary trans_kernel = libxsmm_dispatch_meltw_unary(M, N, (libxsmm_blasint*)&M, (libxsmm_blasint*)&N, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, trans_type); if ( trans_kernel == NULL ) { fprintf( stderr, "JIT for NORM_TO_NORMT TPP. Bailing...!\n"); exit(-1); } trans_kernel( &trans_param ); } //--------------------------------------------------convert f32 to bf16 TPP------------------------------------- inline void cvt_f32_bf16(int N, int M, libxsmm_meltw_unary_param *params) { libxsmm_meltw_unary_flags unary_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE; libxsmm_meltw_unary_type unary_type = LIBXSMM_MELTW_TYPE_UNARY_IDENTITY; libxsmm_meltwfunction_unary cvt_f32_bf16_kernel = libxsmm_dispatch_meltw_unary(M, N, NULL, NULL, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, unary_flags, unary_type ); PCL_ASSERT(cvt_f32_bf16_kernel, "Null cvt_f32_bf16 kernel"); cvt_f32_bf16_kernel(params); } inline void bf16_copy(int N, int M, int LDO, int LDI, libxsmm_meltw_unary_param *params) { libxsmm_meltw_unary_flags unary_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE; libxsmm_meltw_unary_type unary_type = LIBXSMM_MELTW_TYPE_UNARY_IDENTITY; libxsmm_datatype compute_dtype = LIBXSMM_DATATYPE_BF16; libxsmm_meltwfunction_unary kernel = libxsmm_dispatch_meltw_unary(M, N, (libxsmm_blasint*)&LDI, (libxsmm_blasint*)&LDO, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, compute_dtype, unary_flags, unary_type); if ( kernel == NULL ) { fprintf( stderr, "JIT for bf16 to b16 copy failed. Bailing...!\n"); exit(-1); } kernel(params); } inline void colbcast_bf16_copy(int N, int M, libxsmm_meltw_unary_param *params) { libxsmm_meltw_unary_flags unary_flags = LIBXSMM_MELTW_FLAG_UNARY_BCAST_COL; libxsmm_meltw_unary_type unary_type = LIBXSMM_MELTW_TYPE_UNARY_IDENTITY; libxsmm_datatype compute_dtype = LIBXSMM_DATATYPE_BF16; libxsmm_meltwfunction_unary kernel = libxsmm_dispatch_meltw_unary(M, N, NULL, NULL, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, compute_dtype, unary_flags, unary_type); if ( kernel == NULL ) { fprintf( stderr, "JIT for bf16 to b16 broadcast copy failed. Bailing...!\n"); exit(-1); } kernel(params); } inline void bf16_f32_copy(int N, int M, libxsmm_meltw_unary_param *params) { libxsmm_meltw_unary_flags unary_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE; libxsmm_meltw_unary_type unary_type = LIBXSMM_MELTW_TYPE_UNARY_IDENTITY; libxsmm_datatype compute_dtype = LIBXSMM_DATATYPE_F32; libxsmm_meltwfunction_unary kernel = libxsmm_dispatch_meltw_unary(M, N, NULL, NULL, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, compute_dtype, unary_flags, unary_type); if ( kernel == NULL ) { fprintf( stderr, "JIT for bf16 to f32 copy failed. Bailing...!\n"); exit(-1); } kernel(params); } inline void add_bf16_bf16(int N, int M, libxsmm_meltw_binary_param *binary_param) { libxsmm_meltw_binary_flags binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE; libxsmm_meltw_binary_type binary_type = LIBXSMM_MELTW_TYPE_BINARY_ADD; libxsmm_meltwfunction_binary add_kernel = libxsmm_dispatch_meltw_binary(M, N, (libxsmm_blasint*)&M, (libxsmm_blasint*)&M, (libxsmm_blasint*)&N, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, binary_flags, binary_type); if ( add_kernel == NULL ) { fprintf( stderr, "JIT for BINARY TPP. Bailing...!\n"); exit(-1); } add_kernel(binary_param); } inline void relu_fwd_bf16(long N, long M, libxsmm_meltw_unary_param *params) { libxsmm_meltw_unary_flags unary_flags = LIBXSMM_MELTW_FLAG_UNARY_BITMASK_2BYTEMULT; libxsmm_meltwfunction_unary relu_kernel = libxsmm_dispatch_meltw_unary(M, N, NULL, NULL, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, unary_flags, LIBXSMM_MELTW_TYPE_UNARY_RELU); if ( relu_kernel == NULL ) { fprintf( stderr, "JIT for ReLU TPP. Bailing...!\n"); exit(-1); } relu_kernel( params ); } inline void relu_bwd_bf16(long N, long M, libxsmm_meltw_unary_param *params) { libxsmm_meltw_unary_flags unary_flags = LIBXSMM_MELTW_FLAG_UNARY_BITMASK_2BYTEMULT; libxsmm_meltwfunction_unary relu_kernel = libxsmm_dispatch_meltw_unary(M, N, NULL, NULL, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, unary_flags, LIBXSMM_MELTW_TYPE_UNARY_RELU_INV); if ( relu_kernel == NULL ) { fprintf( stderr, "JIT for ReLU TPP. Bailing...!\n"); exit(-1); } relu_kernel( params ); } inline void dropout_bf16(long N, long M, libxsmm_meltw_unary_param *params, libxsmm_meltw_unary_flags flags) { libxsmm_meltwfunction_unary dropout_kernel = libxsmm_dispatch_meltw_unary(M, N, NULL, NULL, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, flags, LIBXSMM_MELTW_TYPE_UNARY_DROPOUT); if ( dropout_kernel == NULL ) { fprintf( stderr, "JIT for DROPOUT TPP. Bailing...!\n"); exit(-1); } dropout_kernel( params ); } inline void dropout_bwd_bf16(long N, long M, libxsmm_meltw_unary_param *params, libxsmm_meltw_unary_flags flags) { libxsmm_meltwfunction_unary dropout_kernel = libxsmm_dispatch_meltw_unary(M, N, NULL, NULL, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, flags, LIBXSMM_MELTW_TYPE_UNARY_DROPOUT_INV); if ( dropout_kernel == NULL ) { fprintf( stderr, "JIT for DROPOUT TPP. Bailing...!\n"); exit(-1); } dropout_kernel( params ); } inline void brgemm_bf16_f32(long n, long m, long k, long stride_b, long stride_a, libxsmm_bfloat16 *B_, libxsmm_bfloat16 *A_, float *C, long count, const float beta = 1.0, const char b_trans='n', const char a_trans='n', const char b_vnni='n', const char a_vnni='n') { const float alpha = 1.0; libxsmm_bfloat16 *A = A_; libxsmm_bfloat16 *B = B_; unsigned long long l_br = count; int flags = LIBXSMM_GEMM_VNNI_FLAGS('n', 'n', 'v', 'n'); // Query or JIT-generate reduction kernel; returns NULL if JIT is not supported (bf16 inputs, fp32-accumulate internally, bf16 outputs). * libxsmm_bsmmfunction_reducebatch_strd kernel = libxsmm_bsmmdispatch_reducebatch_strd(m, n, k, stride_a*sizeof(libxsmm_bfloat16), stride_b*sizeof(libxsmm_bfloat16), NULL, NULL, NULL, &alpha, &beta, &flags, NULL); PCL_ASSERT(kernel, "Null brgemm bf16 kernel\n"); kernel(A, B, C, &l_br); } inline void brgemm_bf16_bf16(long n, long m, long k, long stride_b, long stride_a, libxsmm_bfloat16 *B_, libxsmm_bfloat16 *A_, libxsmm_bfloat16 *C, long count, const float beta = 1.0, const char b_trans='n', const char a_trans='n', const char b_vnni='n', const char a_vnni='n') { const float alpha = 1.0; libxsmm_bfloat16 *A = A_; libxsmm_bfloat16 *B = B_; unsigned long long l_br = count; int flags = LIBXSMM_GEMM_VNNI_FLAGS('n', 'n', 'v', 'n'); // Query or JIT-generate reduction kernel; returns NULL if JIT is not supported (bf16 inputs, fp32-accumulate internally, bf16 outputs). * libxsmm_bmmfunction_reducebatch_strd kernel = libxsmm_bmmdispatch_reducebatch_strd(m, n, k, stride_a*sizeof(libxsmm_bfloat16), stride_b*sizeof(libxsmm_bfloat16), NULL, NULL, NULL, &alpha, &beta, &flags, NULL); PCL_ASSERT(kernel, "Null brgemm bf16 kernel\n"); kernel(A, B, C, &l_br); } inline void delbias_bf16_f32(int N, int M, int ldo, int ldi, libxsmm_meltw_unary_param *delbias_params) { libxsmm_meltwfunction_unary delbias_kernel = libxsmm_dispatch_meltw_unary(M, N, &ldi, &ldo, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD_NCNC_FORMAT); if (delbias_kernel == NULL ) { printf("Could not create bf16 delbias kernel.. bailing\n"); exit(-1); } delbias_kernel(delbias_params); } class MLPCell_BF16 { public: MLPCell_BF16(int N, int C, int K, int bn, int bc, int bk, bool bias, bool skip, int act, bool norm, float p, bool train) { pN = N; pC = C; pK = K; pbn = bn; pbc = bc; pbk = bk; pbias = bias; pskip = skip; pact = act; pnorm = norm; pp = p; ptrain = train; //printf("MLPCell: N = %d, C = %d, K = %d, bf = %d, bias = %d, skip = %d, act = %d, norm = %d, dropout prob = %.2f train = %d\n", N, C, K, bias, skip, act, norm, p, train); } std::vector<at::Tensor> fwd(std::vector<at::Tensor> inputs) { long bn = pbn; long bc = pbc; long bk = pbk; long nn = pN/bn; long nc = pC; long nk = pK; long rn = pN % bn; long in_off = nn*nc*bn*bc; long out_off = nn*nk*bn*bk; long C = nc*bc; long K = nk*bk; // std::cout << "BF16--------------> " << std::endl; long bcp = (bc % 2 != 0) ? (bc + 1): bc; libxsmm_meltw_unary_param copy_params; libxsmm_meltw_unary_param cvt_params; libxsmm_meltw_unary_param relu_params; libxsmm_meltw_unary_param dropout_params; libxsmm_meltw_unary_flags dropout_flags = LIBXSMM_MELTW_FLAG_UNARY_BITMASK_2BYTEMULT; libxsmm_meltw_binary_param add_params; libxsmm_meltw_binary_flags binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE; libxsmm_meltw_binary_type binary_type = LIBXSMM_MELTW_TYPE_BINARY_ADD; int i=0; at::Tensor t_input_l = inputs[i++]; at::Tensor t_input_r = inputs[i++]; at::Tensor t_weights_l = inputs[i++]; at::Tensor t_weights_r = inputs[i++]; at::Tensor t_bias_l = inputs[i++]; at::Tensor t_bias_r = inputs[i++]; at::Tensor t_output = t_input_l.new_empty({pN, K}); int dd = (bk % 32 == 0) ? bk/32 : bk/32 + 1; at::Tensor t_dropout_mask_bN, t_dropout_mask_rN; if(ptrain && pp > 0) { int size = nn*nk*bn*bk; t_dropout_mask_bN = at::empty(size, torch::TensorOptions().dtype(torch::kByte)); if(rn > 0) { size = nk*rn*bk; t_dropout_mask_rN = at::empty(size, torch::TensorOptions().dtype(torch::kByte)); } } __mmask32 (*dropout_mask_bN)[nk][bn][dd] = (ptrain && pp > 0) ? (__mmask32 (*)[nk][bn][dd])(t_dropout_mask_bN.data_ptr()) : NULL; __mmask32 (*dropout_mask_rN)[nk][rn][dd] = (ptrain && pp > 0 && rn > 0) ? (__mmask32 (*)[nk][rn][dd])(t_dropout_mask_rN.data_ptr()) : NULL; int rd = (bk % 32 == 0) ? bk/32 : bk/32 + 1; at::Tensor t_relumask_bN, t_relumask_rN; if(pact==1) { int size = nn*nk*bn*bk; t_relumask_bN = at::empty(size, torch::TensorOptions().dtype(torch::kByte)); if(rn > 0) { size = nk*rn*bk; t_relumask_rN = at::empty(size, torch::TensorOptions().dtype(torch::kByte)); } } __mmask32 (*relumask_bN)[nk][bn][rd] = pact==1 ? (__mmask32 (*)[nk][bn][rd])(t_relumask_bN.data_ptr()) : NULL; __mmask32 (*relumask_rN)[nk][rn][rd] = (pact==1 && rn > 0) ? (__mmask32 (*)[nk][rn][rd])(t_relumask_rN.data_ptr()) : NULL; int threads = 1; #ifdef _OPENMP threads = omp_get_max_threads(); #endif long wts = nk*nc*bk*bcp; long in_bn = threads*nc*bn*bc; long in_rn = nc*rn*bc; long out_bn = threads*nk*bn*bk; long out_rn = nk*rn*bk; long scratch_size; if(pskip) scratch_size = (wts*6 + in_bn*2 + in_rn*2 + out_bn*3 + out_rn*3 + K*2)*sizeof(libxsmm_bfloat16); else scratch_size = (wts*3 + in_bn + in_rn + out_bn + out_rn + K)*sizeof(libxsmm_bfloat16); void *scratch = libxsmm_aligned_malloc(scratch_size, 2097152); libxsmm_bfloat16 *t_bf16_weights_l = (libxsmm_bfloat16*)scratch; libxsmm_bfloat16 *t_tr_weights_l = t_bf16_weights_l + wts; libxsmm_bfloat16 *t_vnni_weights_l = t_tr_weights_l + wts; libxsmm_bfloat16 *t_input_bN_l = t_vnni_weights_l + wts; libxsmm_bfloat16 *t_output_bN_l = t_input_bN_l + in_bn; libxsmm_bfloat16 *t_output_bN = t_output_bN_l + out_bn; libxsmm_bfloat16 *t_bf16_bias_l = t_output_bN + out_bn; libxsmm_bfloat16 *t_input_rN_l=NULL, *t_output_rN_l=NULL, *t_output_rN=NULL; if(rn > 0) { t_input_rN_l = t_bf16_bias_l + K; t_output_rN_l = t_input_rN_l + in_rn; t_output_rN = t_output_rN_l + out_rn; } libxsmm_bfloat16 *t_bf16_weights_r=NULL, *t_tr_weights_r=NULL, *t_vnni_weights_r=NULL, *t_input_bN_r=NULL, *t_output_bN_r=NULL, *t_bf16_bias_r=NULL; libxsmm_bfloat16 *t_input_rN_r=NULL, *t_output_rN_r=NULL; if(pskip) { if(rn > 0) t_bf16_weights_r = t_output_rN + out_rn; else t_bf16_weights_r = t_bf16_bias_l + K; t_tr_weights_r = t_bf16_weights_r + wts; t_vnni_weights_r = t_tr_weights_r + wts; t_input_bN_r = t_vnni_weights_r + wts; t_output_bN_r = t_input_bN_r + in_bn; t_bf16_bias_r = t_output_bN_r + out_bn; if(rn > 0) { t_input_rN_r = t_bf16_bias_r + K; t_output_rN_r = t_input_rN_r + in_rn; } } DECL_VLA_PTR_PT(float, wt_f32_l, [C], t_weights_l); float *bias_f32_l = t_bias_l.data_ptr<float>(); float (*wt_f32_r)[C] = pskip ? (float (*)[C])t_weights_r.data_ptr<float>() : NULL; float *bias_f32_r = pskip ? t_bias_r.data_ptr<float>() : NULL; DECL_VLA_PTR_NPT(libxsmm_bfloat16, at::BFloat16, input_l, [C], t_input_l); DECL_VLA_PTR_NPT_CHECK_COND(pskip, libxsmm_bfloat16, at::BFloat16, input_r, [C], t_input_r); DECL_VLA_PTR_NPT(libxsmm_bfloat16, at::BFloat16, output, [K], t_output); DECL_VLA_PTR(libxsmm_bfloat16, wt_l, [nc][bk][bc], t_bf16_weights_l); DECL_VLA_PTR(libxsmm_bfloat16, tr_wt_l, [nc][bcp][bk], t_tr_weights_l); DECL_VLA_PTR(libxsmm_bfloat16, vnni_wt_l, [nc][bcp/2][bk][2], t_vnni_weights_l); DECL_VLA_PTR(libxsmm_bfloat16, input_bN_l, [nc][bn][bc], t_input_bN_l); DECL_VLA_PTR_CHECK_VAR(rn, libxsmm_bfloat16, input_rN_l, [nc][rn][bc], t_input_rN_l); DECL_VLA_PTR(libxsmm_bfloat16, output_bN, [nk][bn][bk], t_output_bN); DECL_VLA_PTR_CHECK_VAR(rn, libxsmm_bfloat16, output_rN, [nk][rn][bk], t_output_rN); DECL_VLA_PTR(libxsmm_bfloat16, bias_l, [bk], t_bf16_bias_l); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, wt_r, [nc][bk][bc], t_bf16_weights_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, tr_wt_r, [nc][bcp][bk], t_tr_weights_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, vnni_wt_r, [nc][bcp/2][bk][2], t_vnni_weights_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, input_bN_r, [nc][bn][bc], t_input_bN_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, input_rN_r, [nc][rn][bc], t_input_rN_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, output_bN_l, [nk][bn][bk], t_output_bN_l); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, output_bN_r, [nk][bn][bk], t_output_bN_r); DECL_VLA_PTR_CHECK_COND_VAR(pskip, rn, libxsmm_bfloat16, output_rN_l, [nk][rn][bk], t_output_rN_l); DECL_VLA_PTR_CHECK_COND_VAR(pskip, rn, libxsmm_bfloat16, output_rN_r, [nk][rn][bk], t_output_rN_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, bias_r, [bk], t_bf16_bias_r); // Get BF16 copy of weights for(int k=0; k<nk; k++) { for(int c=0; c<nc; c++) { cvt_params.in.primary = &wt_f32_l[k*bk][c*bc]; cvt_params.out.primary = &wt_l[k][c]; cvt_f32_bf16(bk, bc, &cvt_params); } } if(pskip) { for(int k=0; k<nk; k++) { for(int c=0; c<nc; c++) { cvt_params.in.primary = &wt_f32_r[k*bk][c*bc]; cvt_params.out.primary = &wt_r[k][c]; cvt_f32_bf16(bk, bc, &cvt_params); } } } for(int k=0; k<nk; k++) { cvt_params.in.primary = bias_f32_l; cvt_params.out.primary = &bias_l[k]; cvt_f32_bf16(nk, bk, &cvt_params); } if(pskip) { for(int k=0; k<nk; k++) { cvt_params.in.primary = bias_f32_r; cvt_params.out.primary = &bias_r[k]; cvt_f32_bf16(nk, bk, &cvt_params); } } // Wt: NORM layout to VNNI norm_to_normT_16b(wt_l[0][0][0], tr_wt_l[0][0][0], bk, bcp); norm_to_vnni_16b(tr_wt_l[0][0][0], vnni_wt_l[0][0][0][0], bcp, bk); if(pskip) { norm_to_normT_16b(wt_r[0][0][0], tr_wt_r[0][0][0], bk, bcp); norm_to_vnni_16b(tr_wt_r[0][0][0], vnni_wt_r[0][0][0][0], bcp, bk); } #ifdef _OPENMP #pragma omp parallel #endif { int tid = omp_get_thread_num(); int threads = omp_get_max_threads(); int jobs = (nn % threads == 0) ? nn/threads : nn/threads + 1; int tb = (tid*jobs < nn) ? tid*jobs : nn; int te = ((tid+1)*jobs < nn) ? (tid+1)*jobs : nn; int count = nc; libxsmm_meltw_unary_param copy_params; libxsmm_meltw_binary_param add_params; libxsmm_meltw_unary_param relu_params; libxsmm_meltw_unary_param dropout_params; libxsmm_meltw_unary_flags dropout_flags = LIBXSMM_MELTW_FLAG_UNARY_BITMASK_2BYTEMULT; for(int m=tb; m<te; m++) { for(int c=0; c<nc; c++) { copy_params.in.primary = &input_l[m*bn][c*bc]; copy_params.out.primary = &input_bN_l[tid][c]; bf16_copy(bn, bc, nc*bc, nc*bc, &copy_params); } if(pskip) { for(int c=0; c<nc; c++) { copy_params.in.primary = &input_r[m*bn][c*bc]; copy_params.out.primary = &input_bN_r[tid][c]; bf16_copy(bn, bc, nc*bc, nc*bc, &copy_params); } for(int k=0; k<nk; k++) { copy_params.in.primary = &bias_l[k][0]; copy_params.out.primary = &output_bN_l[tid][k]; colbcast_bf16_copy(bn, bk, &copy_params); } for(int k=0; k<nk; k++) { copy_params.in.primary = &bias_r[k][0]; copy_params.out.primary = &output_bN_r[tid][k]; colbcast_bf16_copy(bn, bk, &copy_params); } } else { for(int k=0; k<nk; k++) { copy_params.in.primary = &bias_l[k][0]; copy_params.out.primary = &output_bN[tid][k]; colbcast_bf16_copy(bn, bk, &copy_params); } } if(pskip) { brgemm_bf16_bf16(bn, bk, bcp, bn*bk, 0, input_bN_l[tid][0][0], vnni_wt_l[0][0][0][0], output_bN_l[tid][0][0], count); brgemm_bf16_bf16(bn, bk, bcp, bn*bk, 0, input_bN_r[tid][0][0], vnni_wt_r[0][0][0][0], output_bN_r[tid][0][0], count); add_params.in0.primary = (void*)&output_bN_l[tid][0]; add_params.in1.primary = (void*)&output_bN_r[tid][0]; add_params.out.primary = (void*)&output_bN[tid][0]; add_bf16_bf16(bn, bk, &add_params); } else brgemm_bf16_bf16(bn, bk, bcp, bn*bk, 0, input_bN_l[tid][0][0], vnni_wt_l[0][0][0][0], output_bN[tid][0][0], count); if(pact == 1) { for(int k=0; k<nk; k++) { relu_params.in.primary = &output_bN[tid][k]; relu_params.out.primary = &output_bN[tid][k]; relu_params.out.secondary = &relumask_bN[m][k]; relu_fwd_bf16(bn, bk, &relu_params); } } if(ptrain && pp > 0) { for(int k=0; k<nk; k++) { dropout_params.in.primary = &output_bN[tid][k]; dropout_params.in.tertiary = &pp; dropout_params.in.secondary = rnd_state; dropout_params.out.primary = &output_bN[tid][k]; dropout_params.out.secondary = &dropout_mask_bN[m][k]; dropout_bf16(bn, bk, &dropout_params, dropout_flags); } } for(int k=0; k<nk; k++) { copy_params.in.primary = &output_bN[tid][k]; copy_params.out.primary = &output[m*bn][k*bk]; bf16_copy(bn, bk, nk*bk, nk*bk, &copy_params); } } } if(rn > 0) { // Single-threaded part of compute // for(int c=0; c<nc; c++) { copy_params.in.primary = &input_l[nn*bn][c*bc]; copy_params.out.primary = &input_rN_l[0][c]; bf16_copy(rn, bc, nc*bc, nc*bc, &copy_params); } if(pskip) { for(int c=0; c<nc; c++) { copy_params.in.primary = &input_r[nn*bn][c*bc]; copy_params.out.primary = &input_rN_r[0][c]; bf16_copy(rn, bc, nc*bc, nc*bc, &copy_params); } for(int k=0; k<nk; k++) { copy_params.in.primary = bias_l; copy_params.out.primary = &output_rN_l[0][k]; colbcast_bf16_copy(rn, bk, &copy_params); copy_params.in.primary = bias_r; copy_params.out.primary = &output_rN_r[0][k]; colbcast_bf16_copy(rn, bk, &copy_params); } } else { for(int k=0; k<nk; k++) { copy_params.in.primary = bias_l; copy_params.out.primary = &output_rN[0][k]; colbcast_bf16_copy(rn, bk, &copy_params); } } int count = nc; if(pskip) { brgemm_bf16_bf16(rn, bk, bcp, rn*bk, 0, input_rN_l[0][0][0], vnni_wt_l[0][0][0][0], output_rN_l[0][0][0], count); brgemm_bf16_bf16(rn, bk, bcp, rn*bk, 0, input_rN_r[0][0][0], vnni_wt_r[0][0][0][0], output_rN_r[0][0][0], count); add_params.in0.primary = (void*)&output_rN_l[0][0]; add_params.in1.primary = (void*)&output_rN_r[0][0]; add_params.out.primary = (void*)&output_rN[0][0]; add_bf16_bf16(rn, bk, &add_params); } else brgemm_bf16_bf16(rn, bk, bcp, rn*bk, 0, input_rN_l[0][0][0], vnni_wt_l[0][0][0][0], output_rN[0][0][0], count); if(pact == 1) { for(int k=0; k<nk; k++) { relu_params.in.primary = &output_rN[0][k]; relu_params.out.primary = &output_rN[0][k]; relu_params.out.secondary = &relumask_rN[0][k]; relu_fwd_bf16(rn, bk, &relu_params); } } if(ptrain && pp > 0) { for(int k=0; k<nk; k++) { dropout_params.in.primary = &output_rN[0][k]; dropout_params.in.secondary = rnd_state; dropout_params.in.tertiary = &pp; dropout_params.out.primary = &output_rN[0][k]; dropout_params.out.secondary = &dropout_mask_rN[0][k]; dropout_bf16(rn, bk, &dropout_params, dropout_flags); } } for(int k=0; k<nk; k++) { copy_params.in.primary = &output_rN[0][k]; copy_params.out.primary = &output[nn*bn][k*bk]; bf16_copy(rn, bk, nk*bk, nk*bk, &copy_params); } } libxsmm_free((void*)scratch); return {t_output, t_relumask_bN, t_relumask_rN, t_dropout_mask_bN, t_dropout_mask_rN}; } ////===================================================================================================================================================== //// ====================== BackPass Function =========================== ////===================================================================================================================================================== std::vector<at::Tensor> bwd(std::vector<at::Tensor> inputs) { long bn = pbn; long bc = pbc; long bk = pbk; long nn = pN/bn; long nc = pC; long nk = pK; long rn = pN % bn; long K = nk*bk; long C = nc*bc; libxsmm_meltw_unary_param copy_params; libxsmm_meltw_unary_param relu_params; libxsmm_meltw_unary_param dropout_params; libxsmm_meltw_unary_param delbias_params; libxsmm_meltw_unary_param cvt_params; libxsmm_meltw_unary_flags dropout_flags = LIBXSMM_MELTW_FLAG_UNARY_BITMASK_2BYTEMULT; int threads = 1; #ifdef _OPENMP threads = omp_get_max_threads(); #endif // ---------------- zero Padding to handle brgemm reduction ------------- long bnp = (bn % 2 != 0) ? (bn + 1): bn; long rnp = (rn % 2 != 0) ? (rn + 1): rn; long bkp = (bk % 2 != 0) ? (bk + 1): bk; // ---------------------------------------------------------------------- int i=0; at::Tensor t_grad_output = inputs[i++]; at::Tensor t_input_l = inputs[i++]; at::Tensor t_input_r = inputs[i++]; at::Tensor t_weights_l = inputs[i++]; at::Tensor t_weights_r = inputs[i++]; at::Tensor t_relumask_bN = inputs[i++]; at::Tensor t_relumask_rN = inputs[i++]; at::Tensor t_dropout_mask_bN = inputs[i++]; at::Tensor t_dropout_mask_rN = inputs[i++]; at::Tensor t_grad_weights_l = t_weights_l.new_empty({nk, nc, bk, bc}); at::Tensor t_grad_bias_l = t_weights_l.new_empty(K); at::Tensor t_grad_input_l = t_input_l.new_empty({pN, C}); at::Tensor t_grad_weights_r, t_grad_bias_r, t_grad_input_r; if(pskip) { t_grad_weights_r = t_weights_r.new_empty({nk, nc, bk, bc}); t_grad_bias_r = t_weights_r.new_empty(K); t_grad_input_r = t_input_r.new_empty({pN, C}); } long wts = nk*nc*bkp*bc; long go_bn_k = threads*nk*bn*bkp; long go_rn_k = nk*rn*bkp; long go_bn_n = threads*nk*bnp*bk; long go_rn_n = nk*rnp*bk; long gi_bn = threads*nc*bn*bc; long gi_rn = nc*rn*bc; long tr_go_bn = threads*nk*bnp*bk; long tr_go_rn = nk*rnp*bk; long in_v_bn = threads*nc*bnp*bc; long in_v_rn = nc*rnp*bc; long in_bn = threads*nc*bn*bc; long in_rn = nc*rn*bc; long scratch_size; if(pskip) scratch_size = (wts*4 + go_bn_k + go_rn_k + go_bn_n + go_rn_n + gi_bn*2 + gi_rn*2 + tr_go_bn + tr_go_rn + in_v_bn*2 + in_v_rn*2 + in_bn*2 + in_rn*2)*sizeof(libxsmm_bfloat16) + (nk*nc*bk*bc*2)*sizeof(float); else scratch_size = (wts*2 + go_bn_k + go_rn_k + go_bn_n + go_rn_n + gi_bn + gi_rn + tr_go_bn + tr_go_rn + in_v_bn + in_v_rn + in_bn + in_rn)*sizeof(libxsmm_bfloat16) + (nk*nc*bk*bc)*sizeof(float); void *scratch = libxsmm_aligned_malloc(scratch_size, 2097152); libxsmm_bfloat16* t_grad_output_bN_K = (libxsmm_bfloat16*)scratch; libxsmm_bfloat16* t_grad_output_bN_N = t_grad_output_bN_K + go_bn_k; libxsmm_bfloat16* t_tr_grad_output_bN = t_grad_output_bN_N + go_bn_n; libxsmm_bfloat16* t_input_vnni_bN_l = t_tr_grad_output_bN + tr_go_bn; libxsmm_bfloat16* t_grad_input_bN_l = t_input_vnni_bN_l + in_v_bn; libxsmm_bfloat16* t_input_bN_l = t_grad_input_bN_l + gi_bn; libxsmm_bfloat16* t_vnni_weights_l = t_input_bN_l + in_bn; libxsmm_bfloat16* t_bf16_weights_l = t_vnni_weights_l + wts; float* t_f32_grad_wt_l = (float*)(t_bf16_weights_l + wts); libxsmm_bfloat16 *t_grad_output_rN_K=NULL, *t_grad_output_rN_N=NULL, *t_tr_grad_output_rN=NULL, *t_input_vnni_rN_l=NULL, *t_grad_input_rN_l=NULL; libxsmm_bfloat16 *t_input_rN_l=NULL; if(rn > 0) { t_grad_output_rN_K = (libxsmm_bfloat16*)(t_f32_grad_wt_l + wts); t_grad_output_rN_N = t_grad_output_rN_K + go_rn_k; t_tr_grad_output_rN = t_grad_output_rN_N + go_rn_n; t_input_vnni_rN_l = t_tr_grad_output_rN + tr_go_rn; t_grad_input_rN_l = t_input_vnni_rN_l + in_v_rn; t_input_rN_l = t_grad_input_rN_l + gi_rn; } libxsmm_bfloat16* t_input_vnni_bN_r=NULL, *t_grad_input_bN_r=NULL, *t_input_bN_r=NULL; libxsmm_bfloat16* t_vnni_weights_r=NULL, *t_bf16_weights_r=NULL, *t_input_vnni_rN_r=NULL, *t_grad_input_rN_r=NULL, *t_input_rN_r=NULL; float *t_f32_grad_wt_r=NULL; if(pskip) { if(rn > 0) t_input_vnni_bN_r = t_input_rN_l + in_rn; else t_input_vnni_bN_r = (libxsmm_bfloat16*)(t_f32_grad_wt_l + wts); t_grad_input_bN_r = t_input_vnni_bN_r + in_v_bn; t_input_bN_r = t_grad_input_bN_r + gi_bn; t_vnni_weights_r = t_input_bN_r + in_bn; t_bf16_weights_r = t_vnni_weights_r + wts; t_f32_grad_wt_r = (float*)(t_bf16_weights_r + wts); if(rn > 0) { t_input_vnni_rN_r = (libxsmm_bfloat16*)(t_f32_grad_wt_r + wts); t_grad_input_rN_r = t_input_vnni_rN_r + in_v_rn; t_input_rN_r = t_grad_input_rN_r + gi_rn; } } DECL_VLA_PTR_PT(float, wt_f32_l, [C], t_weights_l); DECL_VLA_PTR_PT(float, grad_wt_l, [C], t_grad_weights_l); float (*wt_f32_r)[C] = pskip ? (float (*)[C])t_weights_r.data_ptr<float>() : NULL; float (*grad_wt_r)[C] = pskip ? (float (*)[C])t_grad_weights_r.data_ptr<float>() : NULL; DECL_VLA_PTR_NPT(libxsmm_bfloat16, at::BFloat16, grad_output, [K], t_grad_output); DECL_VLA_PTR_NPT(libxsmm_bfloat16, at::BFloat16, input_l, [C], t_input_l); DECL_VLA_PTR_NPT(libxsmm_bfloat16, at::BFloat16, grad_input_l, [C], t_grad_input_l); DECL_VLA_PTR_NPT_CHECK_COND(pskip, libxsmm_bfloat16, at::BFloat16, input_r, [C], t_input_r); DECL_VLA_PTR_NPT_CHECK_COND(pskip, libxsmm_bfloat16, at::BFloat16, grad_input_r, [C], t_grad_input_r); DECL_VLA_PTR(libxsmm_bfloat16, grad_output_bN_K, [nk][bn][bkp], t_grad_output_bN_K); DECL_VLA_PTR(libxsmm_bfloat16, grad_output_bN_N, [nk][bnp][bk], t_grad_output_bN_N); DECL_VLA_PTR(libxsmm_bfloat16, tr_grad_output_bN, [nk][bk][bnp], t_tr_grad_output_bN); DECL_VLA_PTR(libxsmm_bfloat16, input_vnni_bN_l, [nc][bnp/2][bc][2], t_input_vnni_bN_l); DECL_VLA_PTR(libxsmm_bfloat16, grad_input_bN_l, [nc][bn][bc], t_grad_input_bN_l); DECL_VLA_PTR(libxsmm_bfloat16, input_bN_l, [nc][bn][bc], t_input_bN_l); DECL_VLA_PTR(libxsmm_bfloat16, vnni_wt_l, [nc][bkp/2][bc][2], t_vnni_weights_l); DECL_VLA_PTR(libxsmm_bfloat16, wt_l, [nc][bk][bc], t_bf16_weights_l); DECL_VLA_PTR(float, grad_wt_f32_l, [nc][bk][bc], t_f32_grad_wt_l); float *grad_bias_l = t_grad_bias_l.data_ptr<float>(); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, input_vnni_bN_r, [nc][bnp/2][bc][2], t_input_vnni_bN_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, grad_input_bN_r, [nc][bn][bc], t_grad_input_bN_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, input_bN_r, [nc][bn][bc], t_input_bN_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, vnni_wt_r, [nc][bkp/2][bc][2], t_vnni_weights_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, wt_r, [nc][bk][bc], t_bf16_weights_r); DECL_VLA_PTR_CHECK_COND(pskip, float, grad_wt_f32_r, [nc][bk][bc], t_f32_grad_wt_r); float *grad_bias_r = pskip ? t_grad_bias_r.data_ptr<float>() : NULL; DECL_VLA_PTR_CHECK_VAR(rn, libxsmm_bfloat16, grad_output_rN_K, [nk][rn][bkp], t_grad_output_rN_K); DECL_VLA_PTR_CHECK_VAR(rn, libxsmm_bfloat16, grad_output_rN_N, [nk][rnp][bk], t_grad_output_rN_N); DECL_VLA_PTR_CHECK_VAR(rn, libxsmm_bfloat16, tr_grad_output_rN, [nk][bk][rnp], t_tr_grad_output_rN); DECL_VLA_PTR_CHECK_VAR(rn, libxsmm_bfloat16, input_vnni_rN_l, [nc][rnp/2][bc][2], t_input_vnni_rN_l); DECL_VLA_PTR_CHECK_VAR(rn, libxsmm_bfloat16, grad_input_rN_l, [nc][rn][bc], t_grad_input_rN_l); DECL_VLA_PTR_CHECK_VAR(rn, libxsmm_bfloat16, input_rN_l, [nc][rn][bc], t_input_rN_l); DECL_VLA_PTR_CHECK_COND_VAR(pskip, rn, libxsmm_bfloat16, input_vnni_rN_r, [nc][rnp/2][bc][2], t_input_vnni_rN_r); DECL_VLA_PTR_CHECK_COND_VAR(pskip, rn, libxsmm_bfloat16, grad_input_rN_r, [nc][rn][bc], t_grad_input_rN_r); DECL_VLA_PTR_CHECK_COND_VAR(pskip, rn, libxsmm_bfloat16, input_rN_r, [nc][rn][bc], t_input_rN_r); int dd = (bk % 32 == 0) ? bk/32 : bk/32 + 1; int rd = (bk % 32 == 0) ? bk/32 : bk/32 + 1; __mmask32 (*dropout_mask_bN)[nk][bn][dd] = (ptrain && pp > 0) ? (__mmask32 (*)[nk][bn][dd])(t_dropout_mask_bN.data_ptr()) : NULL; __mmask32 (*dropout_mask_rN)[nk][rn][dd] = (ptrain && pp > 0 && rn > 0) ? (__mmask32 (*)[nk][rn][dd])(t_dropout_mask_rN.data_ptr()) : NULL; __mmask32 (*relumask_bN)[nk][bn][rd] = pact==1 ? (__mmask32 (*)[nk][bn][rd])(t_relumask_bN.data_ptr()) : NULL; __mmask32 (*relumask_rN)[nk][rn][rd] = (pact==1 && rn > 0) ? (__mmask32 (*)[nk][rn][rd])(t_relumask_rN.data_ptr()) : NULL; copy_params.out.primary = t_f32_grad_wt_l; zero(K*C, &copy_params); copy_params.out.primary = t_grad_weights_l.data_ptr<float>(); zero(K*C, &copy_params); copy_params.out.primary = t_grad_bias_l.data_ptr<float>(); zero(K, &copy_params); if(pskip) { copy_params.out.primary = t_f32_grad_wt_r; zero(K*C, &copy_params); } if(pskip) { copy_params.out.primary = t_grad_weights_r.data_ptr<float>(); zero(K*C, &copy_params); copy_params.out.primary = t_grad_bias_r.data_ptr<float>(); zero(K, &copy_params); } // Get BF16 copy of weights for(int k=0; k<nk; k++) { for(int c=0; c<nc; c++) { cvt_params.in.primary = &wt_f32_l[k*bk][c*bc]; cvt_params.out.primary = &wt_l[k][c]; cvt_f32_bf16(bk, bc, &cvt_params); } } int count = nk; norm_to_vnni_16b(wt_l[0][0][0], vnni_wt_l[0][0][0][0], bkp, bc); //bk x bc --> bkp/2 x bc x 2 if(pskip) { for(int k=0; k<nk; k++) { for(int c=0; c<nc; c++) { cvt_params.in.primary = &wt_f32_r[k*bk][c*bc]; cvt_params.out.primary = &wt_r[k][c]; cvt_f32_bf16(bk, bc, &cvt_params); } } int count = nk; norm_to_vnni_16b(wt_r[0][0][0], vnni_wt_r[0][0][0][0], bkp, bc); //bk x bc --> bkp/2 x bc x 2 } if(pskip) { #ifdef _OPENMP #pragma omp parallel reduction(+: grad_wt_f32_l[:nk][:nc][:bk][:bc], grad_bias_l[:K], grad_wt_f32_r[:nk][:nc][:bk][:bc], grad_bias_r[:K]) #endif { int tid = omp_get_thread_num(); int threads = omp_get_max_threads(); int jobs = (nn % threads == 0) ? nn/threads : nn/threads + 1; int tb = (tid*jobs < nn) ? tid*jobs : nn; int te = ((tid+1)*jobs < nn) ? (tid+1)*jobs : nn; libxsmm_meltw_unary_param relu_params; libxsmm_meltw_unary_param dropout_params; libxsmm_meltw_unary_param copy_params; libxsmm_meltw_unary_param delbias_params; for(int m=tb; m<te; m++) { for(int k=0; k<nk; k++) { if(ptrain && pp > 0) { dropout_params.in.primary = &grad_output[m*bn][k*bk]; dropout_params.in.secondary = &dropout_mask_bN[m][k][0][0]; dropout_params.in.tertiary = &pp; dropout_params.out.primary = &grad_output[m*bn][k*bk]; dropout_bwd_bf16(bn, bk, &dropout_params, dropout_flags); } if(pact == 1) { relu_params.in.primary = &grad_output[m*bn][k*bk]; relu_params.in.secondary = &relumask_bN[m][k][0][0]; relu_params.out.primary = &grad_output[m*bn][k*bk]; relu_bwd_bf16(bn, bk, &relu_params); } copy_params.in.primary = &grad_output[m*bn][k*bk]; copy_params.out.primary = &grad_output_bN_K[tid][k]; bf16_copy(bn, bk, nk*bkp, nk*bk, &copy_params); } brgemm_bf16_bf16(bn, bc, bkp, bn*bkp, 0, grad_output_bN_K[tid][0][0], vnni_wt_l[0][0][0][0], grad_input_bN_l[tid][0][0], count, 0.0); for(int c=0; c<nc; c++) { copy_params.in.primary = &grad_input_bN_l[tid][c]; copy_params.out.primary = &grad_input_l[m*bn][c*bc]; bf16_copy(bn, bc, nc*bc, nc*bc, &copy_params); } brgemm_bf16_bf16(bn, bc, bkp, bn*bkp, 0, grad_output_bN_K[tid][0][0], vnni_wt_r[0][0][0][0], grad_input_bN_r[tid][0][0], count, 0.0); for(int c=0; c<nc; c++) { copy_params.in.primary = &grad_input_bN_r[tid][c]; copy_params.out.primary = &grad_input_r[m*bn][c*bc]; bf16_copy(bn, bc, nc*bc, nc*bc, &copy_params); } for(int k=0; k<nk; k++) { copy_params.in.primary = &grad_output[m*bn][k*bk]; copy_params.out.primary = &grad_output_bN_N[tid][k]; bf16_copy(bnp, bk, nk*bk, nk*bk, &copy_params); norm_to_normT_16b(grad_output_bN_N[tid][k][0], tr_grad_output_bN[tid][k][0], bnp, bk); } for(int c=0; c<nc; c++) { copy_params.in.primary = &input_l[m*bn][c*bc]; copy_params.out.primary = &input_bN_l[tid][c]; bf16_copy(bn, bc, nc*bc, nc*bc, &copy_params); norm_to_vnni_16b(input_bN_l[tid][c][0], input_vnni_bN_l[tid][c][0][0], bnp, bc); } count = 1; brgemm_bf16_f32(bk, bc, bnp, bnp*bk, bnp*bc, tr_grad_output_bN[tid][0][0], input_vnni_bN_l[tid][0][0][0], grad_wt_f32_l[0][0][0], count, 1.0); for(int c=0; c<nc; c++) { copy_params.in.primary = &input_r[m*bn][c*bc]; copy_params.out.primary = &input_bN_r[tid][c]; bf16_copy(bn, bc, nc*bc, nc*bc, &copy_params); norm_to_vnni_16b(input_bN_r[tid][c][0], input_vnni_bN_r[tid][c][0][0], bnp, bc); } count = 1; brgemm_bf16_f32(bk, bc, bnp, bnp*bk, bnp*bc, tr_grad_output_bN[tid][0][0], input_vnni_bN_r[tid][0][0][0], grad_wt_f32_r[0][0][0], count, 1.0); for(int k=0; k<nk; k++) { delbias_params.in.primary = &grad_output_bN_N[tid][k]; delbias_params.out.primary = grad_bias_l; delbias_bf16_f32(bn, bk, bn, bk, &delbias_params); } copy_params.in.primary = grad_bias_l; copy_params.out.primary = grad_bias_r; f32_copy(1, K, K, K, &copy_params); } } } else { #ifdef _OPENMP #pragma omp parallel reduction(+: grad_wt_f32_l[:nk][:nc][:bk][:bc], grad_bias_l[:K]) #endif { int tid = omp_get_thread_num(); int threads = omp_get_max_threads(); int jobs = (nn % threads == 0) ? nn/threads : nn/threads + 1; int tb = (tid*jobs < nn) ? tid*jobs : nn; int te = ((tid+1)*jobs < nn) ? (tid+1)*jobs : nn; libxsmm_meltw_unary_param relu_params; libxsmm_meltw_unary_param dropout_params; libxsmm_meltw_unary_param copy_params; libxsmm_meltw_unary_param delbias_params; for(int m=tb; m<te; m++) { for(int k=0; k<nk; k++) { if(ptrain && pp > 0) { dropout_params.in.primary = &grad_output[m*bn][k*bk]; dropout_params.in.secondary = &dropout_mask_bN[m][k][0][0]; dropout_params.in.tertiary = &pp; dropout_params.out.primary = &grad_output[m*bn][k*bk]; dropout_bwd_bf16(bn, bk, &dropout_params, dropout_flags); } if(pact == 1) { relu_params.in.primary = &grad_output[m*bn][k*bk]; relu_params.in.secondary = &relumask_bN[m][k][0][0]; relu_params.out.primary = &grad_output[m*bn][k*bk]; relu_bwd_bf16(bn, bk, &relu_params); } copy_params.in.primary = &grad_output[m*bn][k*bk]; copy_params.out.primary = &grad_output_bN_K[tid][k]; bf16_copy(bn, bk, nk*bkp, nk*bk, &copy_params); } brgemm_bf16_bf16(bn, bc, bkp, bn*bkp, 0, grad_output_bN_K[tid][0][0], vnni_wt_l[0][0][0][0], grad_input_bN_l[tid][0][0], count, 0.0); for(int c=0; c<nc; c++) { copy_params.in.primary = &grad_input_bN_l[tid][c]; copy_params.out.primary = &grad_input_l[m*bn][c*bc]; bf16_copy(bn, bc, nc*bc, nc*bc, &copy_params); } for(int k=0; k<nk; k++) { copy_params.in.primary = &grad_output[m*bn][k*bk]; copy_params.out.primary = &grad_output_bN_N[tid][k]; bf16_copy(bnp, bk, nk*bk, nk*bk, &copy_params); norm_to_normT_16b(grad_output_bN_N[tid][k][0], tr_grad_output_bN[tid][k][0], bnp, bk); } for(int c=0; c<nc; c++) { copy_params.in.primary = &input_l[m*bn][c*bc]; copy_params.out.primary = &input_bN_l[tid][c]; bf16_copy(bn, bc, nc*bc, nc*bc, &copy_params); norm_to_vnni_16b(input_bN_l[tid][c][0], input_vnni_bN_l[tid][c][0][0], bnp, bc); } count = 1; brgemm_bf16_f32(bk, bc, bnp, bnp*bk, bnp*bc, tr_grad_output_bN[tid][0][0], input_vnni_bN_l[tid][0][0][0], grad_wt_f32_l[0][0][0], count, 1.0); for(int k=0; k<nk; k++) { delbias_params.in.primary = &grad_output_bN_N[tid][k]; delbias_params.out.primary = grad_bias_l; delbias_bf16_f32(bn, bk, bn, bk, &delbias_params); } } } } if(rn > 0) { //Single-thread portion of code-------------------------- // Dropout if(ptrain && pp > 0) { for(int k=0; k<nk; k++) { dropout_params.in.primary = &grad_output[nn*bn][k*bk]; dropout_params.in.secondary = &dropout_mask_rN[0][k][0][0]; dropout_params.in.tertiary = &pp; dropout_params.out.primary = &grad_output[nn*bn][k*bk]; dropout_bwd_bf16(rn, bk, &dropout_params, dropout_flags); } } // ReLU if(pact == 1) { for(int k=0; k<nk; k++) { relu_params.in.primary = &grad_output[nn*bn][k*bk]; relu_params.in.secondary = &relumask_rN[0][k][0][0]; relu_params.out.primary = &grad_output[nn*bn][k*bk]; relu_bwd_bf16(rn, bk, &relu_params); } } //grad-input for(int k=0; k<nk; k++) { copy_params.in.primary = &grad_output[nn*bn][k*bk]; copy_params.out.primary = &grad_output_rN_K[0][k]; bf16_copy(rn, bk, nk*bkp, nk*bk, &copy_params); } brgemm_bf16_bf16(rn, bc, bkp, rn*bkp, 0, grad_output_rN_K[0][0][0], vnni_wt_l[0][0][0][0], grad_input_rN_l[0][0][0], count, 0.0); for(int c=0; c<nc; c++) { copy_params.in.primary = &grad_input_rN_l[0][c]; copy_params.out.primary = &grad_input_l[nn*bn][c*bc]; bf16_copy(rn, bc, nc*bc, nc*bc, &copy_params); } if(pskip) { brgemm_bf16_bf16(rn, bc, bkp, rn*bkp, 0, grad_output_rN_K[0][0][0], vnni_wt_r[0][0][0][0], grad_input_rN_r[0][0][0], count, 0.0); for(int c=0; c<nc; c++) { copy_params.in.primary = &grad_input_rN_r[0][c]; copy_params.out.primary = &grad_input_r[nn*bn][c*bc]; bf16_copy(rn, bc, nc*bc, nc*bc, &copy_params); } } //grad-weights for(int k=0; k<nk; k++) { copy_params.in.primary = &grad_output[nn*bn][k*bk]; copy_params.out.primary = &grad_output_rN_N[0][k]; bf16_copy(rn, bk, nk*bk, nk*bk, &copy_params); norm_to_normT_16b(grad_output_rN_N[0][k][0], tr_grad_output_rN[0][k][0], rnp, bk); } for(int c=0; c<nc; c++) { copy_params.in.primary = &input_l[nn*bn][c*bc]; copy_params.out.primary = &input_rN_l[0][c]; bf16_copy(rn, bc, nc*bc, nc*bc, &copy_params); } for(int c=0; c<nc; c++) norm_to_vnni_16b(input_rN_l[0][c][0], input_vnni_rN_l[0][c][0][0], rnp, bc); count = 1; brgemm_bf16_f32(bk, bc, rnp, rnp*bk, rnp*bc, tr_grad_output_rN[0][0][0], input_vnni_rN_l[0][0][0][0], grad_wt_f32_l[0][0][0], count, 1.0); if(pskip) { for(int c=0; c<nc; c++) { copy_params.in.primary = &input_r[nn*bn][c*bc]; copy_params.out.primary = &input_rN_r[0][c]; bf16_copy(rn, bc, nc*bc, nc*bc, &copy_params); } for(int c=0; c<nc; c++) norm_to_vnni_16b(input_rN_r[0][c][0], input_vnni_rN_r[0][c][0][0], rnp, bc); count = 1; brgemm_bf16_f32(bk, bc, rnp, rnp*bk, rnp*bc, tr_grad_output_rN[0][0][0], input_vnni_rN_r[0][0][0][0], grad_wt_f32_r[0][0][0], count, 1.0); } for(int k=0; k<nk; k++) { delbias_params.in.primary = &grad_output_rN_N[0][k]; delbias_params.out.primary = grad_bias_l; delbias_bf16_f32(rn, bk, rn, bk, &delbias_params); } if(pskip) { for(int k=0; k<nk; k++) { delbias_params.in.primary = &grad_output_rN_N[0][k]; delbias_params.out.primary = grad_bias_r; delbias_bf16_f32(rn, bk, rn, bk, &delbias_params); } } } for(int k=0; k<nk; k++) { for(int c=0; c<nc; c++) { copy_params.in.primary = &grad_wt_f32_l[k][c]; copy_params.out.primary = &grad_wt_l[k*bk][c*bc]; f32_copy(bk, bc, nc*bc, nc*bc, &copy_params); } } if(pskip) { for(int k=0; k<nk; k++) { for(int c=0; c<nc; c++) { copy_params.in.primary = &grad_wt_f32_r[k][c]; copy_params.out.primary = &grad_wt_r[k*bk][c*bc]; f32_copy(bk, bc, nc*bc, nc*bc, &copy_params); } } } libxsmm_free(scratch); return {t_grad_input_l, t_grad_input_r, t_grad_weights_l, t_grad_weights_r, t_grad_bias_l, t_grad_bias_r}; } bool has_bias() {return pbias;} bool has_skip() {return pskip;} bool has_norm() {return pnorm;} private: long pN; long pC; long pK; long pbn; long pbc; long pbk; bool pbias; bool pskip; int pact; bool pnorm; float pp; bool ptrain; }; #endif
numint_uniform_grid.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Fast numerical integration on uniform grids. * (See also cp2k multigrid algorithm) * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <assert.h> #include <complex.h> #include "config.h" #include "cint.h" #include "np_helper/np_helper.h" #include "gto/grid_ao_drv.h" #include "vhf/fblas.h" #ifndef __USE_ISOC99 #define rint(x) (int)round(x) #endif #define PLAIN 0 #define HERMITIAN 1 #define ANTIHERMI 2 #define SYMMETRIC 3 #define OF_CMPLX 2 #define EIJCUTOFF 60 #define EXPMAX 700 #define EXPMIN -700 #define MAX_THREADS 256 #define PTR_EXPDROP 16 #define SQUARE(x) (*(x) * *(x) + *(x+1) * *(x+1) + *(x+2) * *(x+2)) double CINTsquare_dist(const double *r1, const double *r2); double CINTcommon_fac_sp(int l); static const int _LEN_CART[] = { 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 66, 78, 91, 105, 120, 136 }; static const int _CUM_LEN_CART[] = { 1, 4, 10, 20, 35, 56, 84, 120, 165, 220, 286, 364, 455, 560, 680, 816, }; static int _MAX_RR_SIZE[] = { 1, 4, 12, 30, 60, 120, 210, 350, 560, 840, 1260, 1800, 2520, 3465, 4620, 6160, 8008, 10296, 13104, 16380, 20475, }; /* * WHEREX_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if x > 0] * WHEREY_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if y > 0] * WHEREZ_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if z > 0] */ static const int _UPIDY[] = { 1, 3, 4, 6, 7, 8, 10, 11, 12, 13, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 40, 41, 42, 43, 45, 46, 47, 48, 49, 50, 51, 52, 53, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 91, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103, 105,106,107,108,109,110,111,112,113,114,115,116,117,118, 120,121,122,123,124,125,126,127,128,129,130,131,132,133,134, }; static const int _UPIDZ[] = { 2, 4, 5, 7, 8, 9, 11, 12, 13, 14, 16, 17, 18, 19, 20, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 34, 35, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103,104, 106,107,108,109,110,111,112,113,114,115,116,117,118,119, 121,122,123,124,125,126,127,128,129,130,131,132,133,134,135, }; #define WHEREX_IF_L_INC1(i) i #define WHEREY_IF_L_INC1(i) _UPIDY[i] #define WHEREZ_IF_L_INC1(i) _UPIDZ[i] #define STARTX_IF_L_DEC1(l) 0 #define STARTY_IF_L_DEC1(l) (((l)<2)?0:_LEN_CART[(l)-2]) #define STARTZ_IF_L_DEC1(l) (_LEN_CART[(l)-1]-1) void GTOplain_vrr2d_ket_inc1(double *out, const double *g, double *rirj, int li, int lj); /* (li+lj,0) => (li,lj) */ // Input g is used as buffer in the iterations. // Ensure size of g > _MAX_RR_SIZE[li+lj] static void _plain_vrr2d(double *out, double *g, double *gbuf2, int li, int lj, double *ri, double *rj) { const int nmax = li + lj; double *g00, *g01, *gswap, *pg00, *pg01; int row_01, col_01, row_00, col_00; int i, j; double rirj[3]; rirj[0] = ri[0] - rj[0]; rirj[1] = ri[1] - rj[1]; rirj[2] = ri[2] - rj[2]; g00 = gbuf2; g01 = g; for (j = 1; j < lj; j++) { gswap = g00; g00 = g01; g01 = gswap; pg00 = g00; pg01 = g01; for (i = li; i <= nmax-j; i++) { GTOplain_vrr2d_ket_inc1(pg01, pg00, rirj, i, j); row_01 = _LEN_CART[i]; col_01 = _LEN_CART[j]; row_00 = _LEN_CART[i ]; col_00 = _LEN_CART[j-1]; pg00 += row_00*col_00; pg01 += row_01*col_01; } } GTOplain_vrr2d_ket_inc1(out, g01, rirj, li, lj); } /* * rcut is the distance over which the integration (from rcut to infty) is * smaller than the required precision * integral ~= \int_{rcut}^infty r^{l+2} exp(-alpha r^2) dr * * * if l is odd: * integral = \sum_n (l+1)!!/(l+3-2n)!! * rcut^{l+3-2n}/(2 alpha)^n * * exp(-alpha {rcut}^2) * * * elif l is even and rcut > 1: * integral < [\sum_{n<=l/2+1} (l+1)!!/(l+3-2n)!! * rcut^{l+3-2n}/(2 alpha)^n * + 1/(2 alpha)^(l/2+2)] * exp(-alpha {rcut}^2) * * * elif l is even and rcut < 1: * integral < [\sum_{n<=l/2+1} (l+1)!!/(l+3-2n)!! * rcut^{l+3-2n}/(2 alpha)^n] * exp(-alpha {rcut}^2) * + (l+1)!! / (2 alpha)^{l/2+1} * \sqrt(pi/alpha)/2 */ static double gto_rcut(double alpha, int l, double c, double log_prec) { double log_c = log(fabs(c)); double prod = 0; double r = 10.; double log_2a = log(2*alpha); double log_r = log(r); if (2*log_r + log_2a > 1) { // r^2 >~ 3/(2a) prod = (l+1) * log_r - log_2a; } else { prod = -(l+4)/2 * log_2a; } //log_r = .5 * (prod / alpha); //if (2*log_r + log_2a > 1) { // prod = (l+1) * log_r - log_2a; //} else { // prod = -(l+4)/2 * log_2a; //} prod += log_c - log_prec; if (prod < alpha) { // if rcut < 1, estimating based on exp^{-a*rcut^2} prod = log_c - log_prec; } if (prod > 0) { r = sqrt(prod / alpha); } else { r = 0; } return r; } static int _has_overlap(int nx0, int nx1, int nx_per_cell) { return nx0 < nx1 + 3; } static int _num_grids_on_x(int nimgx, int nx0, int nx1, int nx_per_cell) { int ngridx; if (nimgx == 1) { ngridx = nx1 - nx0; } else if (nimgx == 2 && !_has_overlap(nx0, nx1, nx_per_cell)) { ngridx = nx1 - nx0 + nx_per_cell; } else { ngridx = nx_per_cell; } return ngridx; } static int _orth_components(double *xs_exp, int *img_slice, int *grid_slice, double a, double b, double cutoff, double xi, double xj, double ai, double aj, int periodic, int nx_per_cell, int topl, int offset, int submesh, double *cache) { double aij = ai + aj; double xij = (ai * xi + aj * xj) / aij; double heights_inv = b; double xij_frac = xij * heights_inv; double edge0 = xij_frac - cutoff * heights_inv; double edge1 = xij_frac + cutoff * heights_inv; if (edge0 == edge1) { // cutoff may be so small that it does not provide difference to edge0 and // edge1. When edge0 and edge1 are right on the edge of the box (== integer), // nimg0 may be equal to nimg1 and nimg can be 0. Skip this extreme condition. return 0; } int nimg0 = 0; int nimg1 = 1; // If submesh is not identical to mesh, it means the product of the basis // functions should be completely inside the unit cell. Only one image needs to // be considered. if (offset != 0 || submesh != nx_per_cell) { // |i> is the steep function and centered inside image 0. Moving |j> all around // will not change the center of |ij>. The periodic system can be treated as // non-periodic system so that only one image needs to be considered. nimg0 = (int)floor(xij_frac); nimg1 = nimg0 + 1; edge0 = MAX(edge0, nimg0); edge1 = MIN(edge1, nimg1); } else if (periodic) { nimg0 = (int)floor(edge0); nimg1 = (int)ceil (edge1); } int nimg = nimg1 - nimg0; int nmx0 = nimg0 * nx_per_cell; int nmx1 = nimg1 * nx_per_cell; int nmx = nmx1 - nmx0; int nx0 = (int)floor(edge0 * nx_per_cell); int nx1 = (int)ceil (edge1 * nx_per_cell); int nx0_edge; int nx1_edge; // to ensure nx0, nx1 being inside the unit cell if (periodic) { nx0 = (nx0 - nmx0) % nx_per_cell; nx1 = (nx1 - nmx0) % nx_per_cell; if (nx1 == 0) { nx1 = nx_per_cell; } } // If only 1 image is required, after drawing the grids to the unit cell // as above, the periodic system can be treated as a non-periodic // system, which requires [nx0:nx1] being inside submesh. It is // necessary because xij+/-cutoff may be out of the submesh for periodic // systems when offset and submesh are specified. if (nimg == 1) { nx0 = MIN(nx0, offset + submesh); nx0 = MAX(nx0, offset); nx1 = MIN(nx1, offset + submesh); nx1 = MAX(nx1, offset); nx0_edge = nx0; nx1_edge = nx1; } else { nx0_edge = 0; nx1_edge = nmx; } img_slice[0] = nimg0; img_slice[1] = nimg1; grid_slice[0] = nx0; grid_slice[1] = nx1; int ngridx = _num_grids_on_x(nimg, nx0, nx1, nx_per_cell); if (ngridx == 0) { return 0; } int i, m, l; double *px0; double *gridx = cache; double *xs_all = cache + nmx; if (nimg == 1) { xs_all = xs_exp; } int grid_close_to_xij = rint(xij_frac * nx_per_cell) - nmx0; grid_close_to_xij = MIN(grid_close_to_xij, nx1_edge); grid_close_to_xij = MAX(grid_close_to_xij, nx0_edge); double img0_x = a * nimg0; double dx = a / nx_per_cell; double base_x = img0_x + dx * grid_close_to_xij; double x0xij = base_x - xij; double _x0x0 = -aij * x0xij * x0xij; if (_x0x0 < EXPMIN) { return 0; } double _dxdx = -aij * dx * dx; double _x0dx = -2 * aij * x0xij * dx; double exp_dxdx = exp(_dxdx); double exp_2dxdx = exp_dxdx * exp_dxdx; double exp_x0dx = exp(_x0dx + _dxdx); double exp_x0x0 = exp(_x0x0); for (i = grid_close_to_xij; i < nx1_edge; i++) { xs_all[i] = exp_x0x0; exp_x0x0 *= exp_x0dx; exp_x0dx *= exp_2dxdx; } exp_x0dx = exp(_dxdx - _x0dx); exp_x0x0 = exp(_x0x0); for (i = grid_close_to_xij-1; i >= nx0_edge; i--) { exp_x0x0 *= exp_x0dx; exp_x0dx *= exp_2dxdx; xs_all[i] = exp_x0x0; } if (topl > 0) { double x0xi = img0_x - xi; for (i = nx0_edge; i < nx1_edge; i++) { gridx[i] = x0xi + i * dx; } for (l = 1; l <= topl; l++) { px0 = xs_all + (l-1) * nmx; for (i = nx0_edge; i < nx1_edge; i++) { px0[nmx+i] = px0[i] * gridx[i]; } } } if (nimg > 1) { for (l = 0; l <= topl; l++) { px0 = xs_all + l * nmx; for (i = 0; i < nx_per_cell; i++) { xs_exp[l*nx_per_cell+i] = px0[i]; } for (m = 1; m < nimg; m++) { px0 = xs_all + l * nmx + m*nx_per_cell; for (i = 0; i < nx_per_cell; i++) { xs_exp[l*nx_per_cell+i] += px0[i]; } } } } return ngridx; } static int _init_orth_data(double **xs_exp, double **ys_exp, double **zs_exp, int *img_slice, int *grid_slice, int *offset, int *submesh, int *mesh, int topl, int dimension, double cutoff, double ai, double aj, double *ri, double *rj, double *a, double *b, double *cache) { int l1 = topl + 1; *xs_exp = cache; *ys_exp = *xs_exp + l1 * mesh[0]; *zs_exp = *ys_exp + l1 * mesh[1]; int data_size = l1 * (mesh[0] + mesh[1] + mesh[2]); cache += data_size; int ngridx = _orth_components(*xs_exp, img_slice, grid_slice, a[0], b[0], cutoff, ri[0], rj[0], ai, aj, (dimension>=1), mesh[0], topl, offset[0], submesh[0], cache); if (ngridx == 0) { return 0; } int ngridy = _orth_components(*ys_exp, img_slice+2, grid_slice+2, a[4], b[4], cutoff, ri[1], rj[1], ai, aj, (dimension>=2), mesh[1], topl, offset[1], submesh[1], cache); if (ngridy == 0) { return 0; } int ngridz = _orth_components(*zs_exp, img_slice+4, grid_slice+4, a[8], b[8], cutoff, ri[2], rj[2], ai, aj, (dimension>=3), mesh[2], topl, offset[2], submesh[2], cache); if (ngridz == 0) { return 0; } return data_size; } static void _orth_ints(double *out, double *weights, int floorl, int topl, double fac, double *xs_exp, double *ys_exp, double *zs_exp, int *img_slice, int *grid_slice, int *offset, int *submesh, int *mesh, double *cache) { int l1 = topl + 1; int nimgx0 = img_slice[0]; int nimgx1 = img_slice[1]; int nimgy0 = img_slice[2]; int nimgy1 = img_slice[3]; int nimgz0 = img_slice[4]; int nimgz1 = img_slice[5]; int nimgx = nimgx1 - nimgx0; int nimgy = nimgy1 - nimgy0; int nimgz = nimgz1 - nimgz0; int nx0 = grid_slice[0]; int nx1 = grid_slice[1]; int ny0 = grid_slice[2]; int ny1 = grid_slice[3]; int nz0 = grid_slice[4]; int nz1 = grid_slice[5]; int ngridx = _num_grids_on_x(nimgx, nx0, nx1, mesh[0]); int ngridy = _num_grids_on_x(nimgy, ny0, ny1, mesh[1]); //int ngridz = _num_grids_on_x(nimgz, nz0, nz1, mesh[2]); const char TRANS_N = 'N'; const double D0 = 0; const double D1 = 1; int xcols = mesh[1] * mesh[2]; int ycols = mesh[2]; double *weightyz = cache; double *weightz = weightyz + l1*xcols; double *pz, *pweightz; double val; int lx, ly, lz; int l, i, n; //TODO: optimize the case in which nimgy << mesh[1] and nimgz << mesh[2] if (nimgx == 1) { dgemm_(&TRANS_N, &TRANS_N, &xcols, &l1, &ngridx, &fac, weights+nx0*xcols, &xcols, xs_exp+nx0, mesh, &D0, weightyz, &xcols); } else if (nimgx == 2 && !_has_overlap(nx0, nx1, mesh[0])) { dgemm_(&TRANS_N, &TRANS_N, &xcols, &l1, &nx1, &fac, weights, &xcols, xs_exp, mesh, &D0, weightyz, &xcols); ngridx = mesh[0] - nx0; dgemm_(&TRANS_N, &TRANS_N, &xcols, &l1, &ngridx, &fac, weights+nx0*xcols, &xcols, xs_exp+nx0, mesh, &D1, weightyz, &xcols); } else { dgemm_(&TRANS_N, &TRANS_N, &xcols, &l1, mesh, &fac, weights, &xcols, xs_exp, mesh, &D0, weightyz, &xcols); } if (nimgy == 1) { for (lx = 0; lx <= topl; lx++) { dgemm_(&TRANS_N, &TRANS_N, &ycols, &l1, &ngridy, &D1, weightyz+lx*xcols+ny0*ycols, &ycols, ys_exp+ny0, mesh+1, &D0, weightz+lx*l1*ycols, &ycols); // call _orth_dot_z if ngridz << nimgz } } else if (nimgy == 2 && !_has_overlap(ny0, ny1, mesh[1])) { ngridy = mesh[1] - ny0; for (lx = 0; lx <= topl; lx++) { dgemm_(&TRANS_N, &TRANS_N, &ycols, &l1, &ny1, &D1, weightyz+lx*xcols, &ycols, ys_exp, mesh+1, &D0, weightz+lx*l1*ycols, &ycols); dgemm_(&TRANS_N, &TRANS_N, &ycols, &l1, &ngridy, &D1, weightyz+lx*xcols+ny0*ycols, &ycols, ys_exp+ny0, mesh+1, &D1, weightz+lx*l1*ycols, &ycols); // call _orth_dot_z if ngridz << nimgz } } else { for (lx = 0; lx <= topl; lx++) { dgemm_(&TRANS_N, &TRANS_N, &ycols, &l1, mesh+1, &D1, weightyz+lx*xcols, &ycols, ys_exp, mesh+1, &D0, weightz+lx*l1*ycols, &ycols); } } if (nimgz == 1) { for (n = 0, l = floorl; l <= topl; l++) { for (lx = l; lx >= 0; lx--) { for (ly = l - lx; ly >= 0; ly--, n++) { lz = l - lx - ly; pz = zs_exp + lz * mesh[2]; pweightz = weightz + (lx * l1 + ly) * mesh[2]; val = 0; for (i = nz0; i < nz1; i++) { val += pweightz[i] * pz[i]; } out[n] = val; } } } } else if (nimgz == 2 && !_has_overlap(nz0, nz1, mesh[2])) { for (n = 0, l = floorl; l <= topl; l++) { for (lx = l; lx >= 0; lx--) { for (ly = l - lx; ly >= 0; ly--, n++) { lz = l - lx - ly; pz = zs_exp + lz * mesh[2]; pweightz = weightz + (lx * l1 + ly) * mesh[2]; val = 0; for (i = 0; i < nz1; i++) { val += pweightz[i] * pz[i]; } for (i = nz0; i < mesh[2]; i++) { val += pweightz[i] * pz[i]; } out[n] = val; } } } } else { for (n = 0, l = floorl; l <= topl; l++) { for (lx = l; lx >= 0; lx--) { for (ly = l - lx; ly >= 0; ly--, n++) { lz = l - lx - ly; pz = zs_exp + lz * mesh[2]; pweightz = weightz + (lx * l1 + ly) * mesh[2]; val = 0; for (i = 0; i < mesh[2]; i++) { val += pweightz[i] * pz[i]; } out[n] = val; } } } } } int NUMINTeval_lda_orth(double *weights, double *out, int comp, int li, int lj, double ai, double aj, double *ri, double *rj, double fac, double log_prec, int dimension, double *a, double *b, int *offset, int *submesh, int *mesh, double *cache) { int floorl = li; int topl = li + lj; int offset_g1d = _CUM_LEN_CART[floorl] - _LEN_CART[floorl]; int len_g3d = _CUM_LEN_CART[topl] - offset_g1d; double cutoff = gto_rcut(ai+aj, topl, fac, log_prec); double *g3d = cache; cache += len_g3d; int img_slice[6]; int grid_slice[6]; double *xs_exp, *ys_exp, *zs_exp; int data_size = _init_orth_data(&xs_exp, &ys_exp, &zs_exp, img_slice, grid_slice, offset, submesh, mesh, topl, dimension, cutoff, ai, aj, ri, rj, a, b, cache); if (data_size == 0) { return 0; } cache += data_size; _orth_ints(g3d, weights, floorl, topl, fac, xs_exp, ys_exp, zs_exp, img_slice, grid_slice, offset, submesh, mesh, cache); cache = g3d + _MAX_RR_SIZE[topl]; _plain_vrr2d(out, g3d, cache, li, lj, ri, rj); return 1; } static void _rr_nablax_i(double *out, double *li_up, double *li_down, int li, int lj, double ai) { int di = _LEN_CART[li]; int di1 = _LEN_CART[li+1]; int dj = _LEN_CART[lj]; int li_1 = li - 1; int i, j, lx, ly; double fac = -2 * ai; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out[di*j+i] += li_up[di1*j+WHEREX_IF_L_INC1(i)] * fac; } } if (li_1 >= 0) { di1 = _LEN_CART[li_1]; for (i = 0, lx = li_1; lx >= 0; lx--) { for (ly = li_1 - lx; ly >= 0; ly--, i++) { //lz = li_1 - lx - ly; fac = lx + 1; for (j = 0; j < dj; j++) { out[di*j+WHEREX_IF_L_INC1(i)] += li_down[di1*j+i] * fac; } } } } } static void _rr_nablay_i(double *out, double *li_up, double *li_down, int li, int lj, double ai) { int di = _LEN_CART[li]; int di1 = _LEN_CART[li+1]; int dj = _LEN_CART[lj]; int li_1 = li - 1; int i, j, lx, ly; double fac = -2 * ai; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out[di*j+i] += li_up[di1*j+WHEREY_IF_L_INC1(i)] * fac; } } if (li_1 >= 0) { di1 = _LEN_CART[li_1]; for (i = 0, lx = li_1; lx >= 0; lx--) { for (ly = li_1 - lx; ly >= 0; ly--, i++) { //lz = li_1 - lx - ly; fac = ly + 1; for (j = 0; j < dj; j++) { out[di*j+WHEREY_IF_L_INC1(i)] += li_down[di1*j+i] * fac; } } } } } static void _rr_nablaz_i(double *out, double *li_up, double *li_down, int li, int lj, double ai) { int di = _LEN_CART[li]; int di1 = _LEN_CART[li+1]; int dj = _LEN_CART[lj]; int li_1 = li - 1; int i, j, lx, ly, lz; double fac = -2 * ai; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out[di*j+i] += li_up[di1*j+WHEREZ_IF_L_INC1(i)] * fac; } } if (li_1 >= 0) { di1 = _LEN_CART[li_1]; for (i = 0, lx = li_1; lx >= 0; lx--) { for (ly = li_1 - lx; ly >= 0; ly--, i++) { lz = li_1 - lx - ly; fac = lz + 1; for (j = 0; j < dj; j++) { out[di*j+WHEREZ_IF_L_INC1(i)] += li_down[di1*j+i] * fac; } } } } } static void _plain_vrr2d_updown(double *out_up, double *out_down, double *g, double *gbuf2, int li, int lj, double *ri, double *rj) { int nmax = li + 1 + lj; int li_1 = MAX(li - 1, 0); double *g00, *g01, *gswap, *pg00, *pg01; int row_01, col_01, row_00, col_00; int i, j; double rirj[3]; rirj[0] = ri[0] - rj[0]; rirj[1] = ri[1] - rj[1]; rirj[2] = ri[2] - rj[2]; g00 = gbuf2; g01 = g; for (j = 1; j < lj; j++) { gswap = g00; g00 = g01; g01 = gswap; pg00 = g00; pg01 = g01; for (i = li_1; i <= nmax-j; i++) { GTOplain_vrr2d_ket_inc1(pg01, pg00, rirj, i, j); row_01 = _LEN_CART[i]; col_01 = _LEN_CART[j]; row_00 = _LEN_CART[i ]; col_00 = _LEN_CART[j-1]; pg00 += row_00*col_00; pg01 += row_01*col_01; } } if (li == 0) { g01 += _LEN_CART[MAX(lj-1, 0)]; } else { GTOplain_vrr2d_ket_inc1(out_down, g01, rirj, li_1, lj); g01 += (_LEN_CART[li_1] + _LEN_CART[li]) * _LEN_CART[MAX(lj-1, 0)]; } GTOplain_vrr2d_ket_inc1(out_up, g01, rirj, li+1, lj); } int NUMINTeval_gga_orth(double *weights, double *out, int comp, int li, int lj, double ai, double aj, double *ri, double *rj, double fac, double log_prec, int dimension, double *a, double *b, int *offset, int *submesh, int *mesh, double *cache) { int floorl = MAX(li - 1, 0); int topl = li + 1 + lj; int di = _LEN_CART[li]; int dj = _LEN_CART[lj]; double cutoff = gto_rcut(ai+aj, topl, fac, log_prec); double *out_up = cache; double *out_down = out_up + _LEN_CART[li+1] * dj; double *g3d = out_down + di * dj; cache = g3d + _MAX_RR_SIZE[topl]; int img_slice[6]; int grid_slice[6]; double *xs_exp, *ys_exp, *zs_exp; int data_size = _init_orth_data(&xs_exp, &ys_exp, &zs_exp, img_slice, grid_slice, offset, submesh, mesh, topl, dimension, cutoff, ai, aj, ri, rj, a, b, cache); if (data_size == 0) { return 0; } cache += data_size; size_t ngrids = ((size_t)mesh[0]) * mesh[1] * mesh[2]; double *vx = weights + ngrids; double *vy = vx + ngrids; double *vz = vy + ngrids; _orth_ints(g3d, weights, li, li+lj, fac, xs_exp, ys_exp, zs_exp, img_slice, grid_slice, offset, submesh, mesh, cache); _plain_vrr2d(out, g3d, cache, li, lj, ri, rj); _orth_ints(g3d, vx, floorl, topl, fac, xs_exp, ys_exp, zs_exp, img_slice, grid_slice, offset, submesh, mesh, cache); _plain_vrr2d_updown(out_up, out_down, g3d, cache, li, lj, ri, rj); _rr_nablax_i(out, out_up, out_down, li, lj, ai); _orth_ints(g3d, vy, floorl, topl, fac, xs_exp, ys_exp, zs_exp, img_slice, grid_slice, offset, submesh, mesh, cache); _plain_vrr2d_updown(out_up, out_down, g3d, cache, li, lj, ri, rj); _rr_nablay_i(out, out_up, out_down, li, lj, ai); _orth_ints(g3d, vz, floorl, topl, fac, xs_exp, ys_exp, zs_exp, img_slice, grid_slice, offset, submesh, mesh, cache); _plain_vrr2d_updown(out_up, out_down, g3d, cache, li, lj, ri, rj); _rr_nablaz_i(out, out_up, out_down, li, lj, ai); return 1; } static int _MAX_AFFINE_SIZE[] = { 1, 8, 32, 108, 270, 640, 1280, 2500, 4375, 7560, 12096, 19208, 28812, 43008, 61440, 87480, }; /* * x = a00 x' + a10 y' + a20 z' * y = a01 x' + a11 y' + a21 z' * z = a02 x' + a12 y' + a22 z' * Given f(x',y',z') use the above equations to evaluate f(x,y,z) */ static void _affine_trans(double *out, double *int3d, double *a, int floorl, int topl, double *cache) { if (topl == 0) { out[0] = int3d[0]; return; } int lx, ly, lz, l, m, n, i; int l1, l1l1, l1l1l1, lll; double *old = int3d; double *new = cache + _MAX_AFFINE_SIZE[topl]; double *oldx, *oldy, *oldz, *newx, *tmp; double vx, vy, vz; if (floorl == 0) { out[0] = int3d[0]; out += 1; } for (m = 1, l = topl; m <= topl; m++, l--) { l1 = l + 1; l1l1 = l1 * l1; lll = l * l * l; l1l1l1 = l1l1 * l1; newx = new; // attach x for (i = STARTX_IF_L_DEC1(m); i < _LEN_CART[m-1]; i++) { oldx = old + i * l1l1l1 + l1l1; oldy = old + i * l1l1l1 + l1; oldz = old + i * l1l1l1 + 1; for (n = 0, lx = 0; lx < l; lx++) { for (ly = 0; ly < l; ly++) { for (lz = 0; lz < l; lz++, n++) { vx = oldx[lx*l1l1+ly*l1+lz]; vy = oldy[lx*l1l1+ly*l1+lz]; vz = oldz[lx*l1l1+ly*l1+lz]; newx[n] = vx * a[0] + vy * a[3] + vz * a[6]; } } } newx += lll; } // attach y for (i = STARTY_IF_L_DEC1(m); i < _LEN_CART[m-1]; i++) { oldx = old + i * l1l1l1 + l1l1; oldy = old + i * l1l1l1 + l1; oldz = old + i * l1l1l1 + 1; for (n = 0, lx = 0; lx < l; lx++) { for (ly = 0; ly < l; ly++) { for (lz = 0; lz < l; lz++, n++) { vx = oldx[lx*l1l1+ly*l1+lz]; vy = oldy[lx*l1l1+ly*l1+lz]; vz = oldz[lx*l1l1+ly*l1+lz]; newx[n] = vx * a[1] + vy * a[4] + vz * a[7]; } } } newx += lll; } // attach z i = STARTZ_IF_L_DEC1(m); oldx = old + i * l1l1l1 + l1l1; oldy = old + i * l1l1l1 + l1; oldz = old + i * l1l1l1 + 1; for (n = 0, lx = 0; lx < l; lx++) { for (ly = 0; ly < l; ly++) { for (lz = 0; lz < l; lz++, n++) { vx = oldx[lx*l1l1+ly*l1+lz]; vy = oldy[lx*l1l1+ly*l1+lz]; vz = oldz[lx*l1l1+ly*l1+lz]; newx[n] = vx * a[2] + vy * a[5] + vz * a[8]; } } } if (floorl <= m) { for (i = 0; i < _LEN_CART[m]; i++) { out[i] = new[i * lll]; } out += _LEN_CART[m]; } if (m == 1) { old = new; new = cache; } else { tmp = old; old = new; new = tmp; } } } static void _reverse_affine_trans(double *out3d, double *in, double *a, int floorl, int topl, double *cache) { if (topl == 0) { out3d[0] = in[0]; return; } int lx, ly, lz, l, m, n, i; int l1, l1l1, l1l1l1, lll; double *cart = in; double *old = cache; double *new = cache + _MAX_AFFINE_SIZE[topl]; double *oldx, *newx, *newy, *newz, *tmp; for (l = floorl; l <= topl; l++) { cart += _LEN_CART[l]; } for (l = 1, m = topl; l <= topl; l++, m--) { l1 = l + 1; l1l1 = l1 * l1; lll = l * l * l; l1l1l1 = l1l1 * l1; if (l == topl) { new = out3d; } for (n = 0; n < l1l1l1*_LEN_CART[m-1]; n++) { new[n] = 0; } if (floorl <= m) { cart -= _LEN_CART[m]; for (i = 0; i < _LEN_CART[m]; i++) { old[i * lll] = cart[i]; } } oldx = old; // attach x for (i = STARTX_IF_L_DEC1(m); i < _LEN_CART[m-1]; i++) { newx = new + i * l1l1l1 + l1l1; newy = new + i * l1l1l1 + l1; newz = new + i * l1l1l1 + 1; for (n = 0, lx = 0; lx < l; lx++) { for (ly = 0; ly < l; ly++) { for (lz = 0; lz < l; lz++, n++) { newx[lx*l1l1+ly*l1+lz] += a[0] * oldx[n]; newy[lx*l1l1+ly*l1+lz] += a[3] * oldx[n]; newz[lx*l1l1+ly*l1+lz] += a[6] * oldx[n]; } } } oldx += lll; } // attach y for (i = STARTY_IF_L_DEC1(m); i < _LEN_CART[m-1]; i++) { newx = new + i * l1l1l1 + l1l1; newy = new + i * l1l1l1 + l1; newz = new + i * l1l1l1 + 1; for (n = 0, lx = 0; lx < l; lx++) { for (ly = 0; ly < l; ly++) { for (lz = 0; lz < l; lz++, n++) { newx[lx*l1l1+ly*l1+lz] += a[1] * oldx[n]; newy[lx*l1l1+ly*l1+lz] += a[4] * oldx[n]; newz[lx*l1l1+ly*l1+lz] += a[7] * oldx[n]; } } } oldx += lll; } // attach z i = STARTZ_IF_L_DEC1(m); newx = new + i * l1l1l1 + l1l1; newy = new + i * l1l1l1 + l1; newz = new + i * l1l1l1 + 1; for (n = 0, lx = 0; lx < l; lx++) { for (ly = 0; ly < l; ly++) { for (lz = 0; lz < l; lz++, n++) { newx[lx*l1l1+ly*l1+lz] += a[2] * oldx[n]; newy[lx*l1l1+ly*l1+lz] += a[5] * oldx[n]; newz[lx*l1l1+ly*l1+lz] += a[8] * oldx[n]; } } } tmp = new; new = old; old = tmp; } if (floorl == 0) { out3d[0] = in[0]; } } static int _nonorth_components(double *xs_exp, int *img_slice, int *grid_slice, double *b, int periodic, int nx_per_cell, int topl, int offset, int submesh, double xi_frac, double xij_frac, double cutoff) { double heights_inv = sqrt(SQUARE(b)); double edge0 = xij_frac - cutoff * heights_inv; double edge1 = xij_frac + cutoff * heights_inv; if (edge0 == edge1) { // cutoff may be so small that it does not provide difference to edge0 and // edge1. When edge0 and edge1 are right on the edge of the box (== integer), // nimg0 may be equal to nimg1 and nimg can be 0. Skip this extreme condition. return 0; } int nimg0 = 0; int nimg1 = 1; // If submesh is not identical to mesh, it means the product of the basis // functions should be completely inside the unit cell. Only one image needs to // be considered. if (offset != 0 || submesh != nx_per_cell) { // |i> is the steep function and centered inside image 0. Moving |j> all around // will not change the center of |ij>. The periodic system can be treated as // non-periodic system so that only one image needs to be considered. nimg0 = (int)floor(xij_frac); nimg1 = nimg0 + 1; edge0 = MAX(edge0, nimg0); edge1 = MIN(edge1, nimg1); } else if (periodic) { nimg0 = (int)floor(edge0); nimg1 = (int)ceil (edge1); } int nimg = nimg1 - nimg0; int nmx0 = nimg0 * nx_per_cell; int nx0 = (int)floor(edge0 * nx_per_cell); int nx1 = (int)ceil (edge1 * nx_per_cell); if (nimg == 1) { nx0 = MIN(nx0, nmx0 + offset + submesh); nx0 = MAX(nx0, nmx0 + offset); nx1 = MIN(nx1, nmx0 + offset + submesh); nx1 = MAX(nx1, nmx0 + offset); } img_slice[0] = nimg0; img_slice[1] = nimg1; grid_slice[0] = nx0; grid_slice[1] = nx1; int nx = nx1 - nx0; if (nx <= 0) { return 0; } int i, l; double x0; double dx = 1. / nx_per_cell; double *pxs_exp; for (i = 0; i < nx; i++) { xs_exp[i] = 1; } for (l = 1; l <= topl; l++) { pxs_exp = xs_exp + (l-1) * nx; x0 = nx0 * dx - xi_frac; for (i = 0; i < nx; i++, x0+=dx) { xs_exp[l*nx+i] = x0 * pxs_exp[i]; } } return nx; } static void _nonorth_dot_z(double *val, double *weights, int meshz, int nz0, int nz1, int grid_close_to_zij, double e_z0z0, double e_z0dz, double e_dzdz, double _z0dz, double _dzdz) { int iz, iz1; if (e_z0z0 == 0) { for (iz = 0; iz < nz1-nz0; iz++) { val[iz] = 0; } return; } double exp_2dzdz = e_dzdz * e_dzdz; double exp_z0z0, exp_z0dz; exp_z0z0 = e_z0z0; exp_z0dz = e_z0dz * e_dzdz; //:iz1 = grid_close_to_zij % meshz + meshz; //:for (iz = grid_close_to_zij-nz0; iz < nz1-nz0; iz++, iz1++) { //: if (iz1 >= meshz) { //: iz1 -= meshz; //: } //: val[iz] = weights[iz1] * exp_z0z0; //: exp_z0z0 *= exp_z0dz; //: exp_z0dz *= exp_2dzdz; //:} iz1 = grid_close_to_zij % meshz; if (iz1 < 0) { iz1 += meshz; } iz = grid_close_to_zij-nz0; while (iz+meshz-iz1 < nz1-nz0) { for (; iz1 < meshz; iz1++, iz++) { val[iz] = weights[iz1] * exp_z0z0; exp_z0z0 *= exp_z0dz; exp_z0dz *= exp_2dzdz; } iz1 = 0; } for (; iz < nz1-nz0; iz++, iz1++) { val[iz] = weights[iz1] * exp_z0z0; exp_z0z0 *= exp_z0dz; exp_z0dz *= exp_2dzdz; } exp_z0z0 = e_z0z0; if (e_z0dz != 0) { exp_z0dz = e_dzdz / e_z0dz; } else { exp_z0dz = exp(_dzdz - _z0dz); } //:iz1 = (grid_close_to_zij-1) % meshz; //:for (iz = grid_close_to_zij-nz0-1; iz >= 0; iz--, iz1--) { //: if (iz1 < 0) { //: iz1 += meshz; //: } //: exp_z0z0 *= exp_z0dz; //: exp_z0dz *= exp_2dzdz; //: val[iz] = weights[iz1] * exp_z0z0; //:} iz1 = (grid_close_to_zij-1) % meshz; if (iz1 < 0) { iz1 += meshz; } iz = grid_close_to_zij-nz0 - 1; while (iz-iz1 >= 0) { for (; iz1 >= 0; iz1--, iz--) { exp_z0z0 *= exp_z0dz; exp_z0dz *= exp_2dzdz; val[iz] = weights[iz1] * exp_z0z0; } iz1 = meshz - 1; } for (; iz >= 0; iz--, iz1--) { exp_z0z0 *= exp_z0dz; exp_z0dz *= exp_2dzdz; val[iz] = weights[iz1] * exp_z0z0; } } static void _nonorth_dot_z_1img(double *val, double *weights, int meshz, int nz0, int nz1, int grid_close_to_zij, double e_z0z0, double e_z0dz, double e_dzdz, double _z0dz, double _dzdz) { int iz, iz1; if (e_z0z0 == 0) { for (iz = 0; iz < nz1-nz0; iz++) { val[iz] = 0; } return; } double exp_2dzdz = e_dzdz * e_dzdz; double exp_z0z0, exp_z0dz; exp_z0z0 = e_z0z0; exp_z0dz = e_z0dz * e_dzdz; iz1 = grid_close_to_zij % meshz; if (iz1 < 0) { iz1 += meshz; } for (iz = grid_close_to_zij-nz0; iz < nz1-nz0; iz++, iz1++) { val[iz] = weights[iz1] * exp_z0z0; exp_z0z0 *= exp_z0dz; exp_z0dz *= exp_2dzdz; } exp_z0z0 = e_z0z0; if (e_z0dz != 0) { exp_z0dz = e_dzdz / e_z0dz; } else { exp_z0dz = exp(_dzdz - _z0dz); } iz1 = (grid_close_to_zij-1) % meshz; if (iz1 < 0) { iz1 += meshz; } for (iz = grid_close_to_zij-nz0-1; iz >= 0; iz--, iz1--) { exp_z0z0 *= exp_z0dz; exp_z0dz *= exp_2dzdz; val[iz] = weights[iz1] * exp_z0z0; } } static void _nonorth_ints(double *out, double *weights, double fac, double aij, int topl, int dimension, double *a, double *rij_frac, int *mesh, int *img_slice, int *grid_slice, double *xs_exp, double *ys_exp, double *zs_exp, double *cache) { int l1 = topl + 1; int l1l1 = l1 * l1; int l1l1l1 = l1l1 * l1; int nx0 = grid_slice[0]; int nx1 = grid_slice[1]; int ny0 = grid_slice[2]; int ny1 = grid_slice[3]; int nz0 = grid_slice[4]; int nz1 = grid_slice[5]; int ngridx = nx1 - nx0; int ngridy = ny1 - ny0; int ngridz = nz1 - nz0; //int nimgx0 = img_slice[0]; //int nimgx1 = img_slice[1]; //int nimgy0 = img_slice[2]; //int nimgy1 = img_slice[3]; int nimgz0 = img_slice[4]; int nimgz1 = img_slice[5]; //int nimgx = nimgx1 - nimgx0; //int nimgy = nimgy1 - nimgy0; int nimgz = nimgz1 - nimgz0; const char TRANS_T = 'T'; const char TRANS_N = 'N'; const double D0 = 0; const double D1 = 1; // aa = einsum('ij,kj->ik', a, a) //double aa[9]; //int n3 = 3; //dgemm_(&TRANS_T, &TRANS_N, &n3, &n3, &n3, // &aij, a, &n3, a, &n3, &D0, aa, &n3); double aa_xx = aij * (a[0] * a[0] + a[1] * a[1] + a[2] * a[2]); double aa_xy = aij * (a[0] * a[3] + a[1] * a[4] + a[2] * a[5]); double aa_xz = aij * (a[0] * a[6] + a[1] * a[7] + a[2] * a[8]); double aa_yy = aij * (a[3] * a[3] + a[4] * a[4] + a[5] * a[5]); double aa_yz = aij * (a[3] * a[6] + a[4] * a[7] + a[5] * a[8]); double aa_zz = aij * (a[6] * a[6] + a[7] * a[7] + a[8] * a[8]); int ix, iy, ix1, iy1, n; double dx = 1. / mesh[0]; double dy = 1. / mesh[1]; double dz = 1. / mesh[2]; double *cache_xyz = cache; double *weight_x = cache_xyz + l1l1l1; double *weight_z = weight_x + l1l1 * ngridx; double *weight_yz = weight_z + l1 * ngridz; double *pweights; //int grid_close_to_xij = rint(rij_frac[0] * mesh[0]); int grid_close_to_yij = rint(rij_frac[1] * mesh[1]); int grid_close_to_zij = rint(rij_frac[2] * mesh[2]); //grid_close_to_xij = MIN(grid_close_to_xij, nx1); //grid_close_to_xij = MAX(grid_close_to_xij, nx0); grid_close_to_yij = MIN(grid_close_to_yij, ny1); grid_close_to_yij = MAX(grid_close_to_yij, ny0); grid_close_to_zij = MIN(grid_close_to_zij, nz1); grid_close_to_zij = MAX(grid_close_to_zij, nz0); double img0_x = 0; double img0_y = 0; double img0_z = 0; double base_x = img0_x;// + dx * grid_close_to_xij; double base_y = img0_y + dy * grid_close_to_yij; double base_z = img0_z + dz * grid_close_to_zij; double x0xij = base_x - rij_frac[0]; double y0yij = base_y - rij_frac[1]; double z0zij = base_z - rij_frac[2]; double _dydy = -dy * dy * aa_yy; double _dzdz = -dz * dz * aa_zz; double _dydz = -dy * dz * aa_yz * 2; double exp_dydy = exp(_dydy); double exp_2dydy = exp_dydy * exp_dydy; double exp_dzdz = exp(_dzdz); double exp_dydz = exp(_dydz); double exp_dydz_i = (exp_dydz == 0) ? 0 : 1./exp_dydz; double x1xij, tmpx, tmpy, tmpz; double _xyz0xyz0, _xyz0dy, _xyz0dz, _z0dz; double exp_xyz0xyz0, exp_xyz0dz; double exp_y0dy, exp_z0z0, exp_z0dz; ix1 = nx0 % mesh[0] + mesh[0]; for (ix = nx0; ix < nx1; ix++, ix1++) { if (ix1 >= mesh[0]) { ix1 -= mesh[0]; } x1xij = x0xij + ix*dx; tmpx = x1xij * aa_xx + y0yij * aa_xy + z0zij * aa_xz; tmpy = x1xij * aa_xy + y0yij * aa_yy + z0zij * aa_yz; tmpz = x1xij * aa_xz + y0yij * aa_yz + z0zij * aa_zz; _xyz0xyz0 = -x1xij * tmpx - y0yij * tmpy - z0zij * tmpz; if (_xyz0xyz0 < EXPMIN) { // _xyz0dy (and _xyz0dz) can be very big, even greater than the effective range // of exp function (and produce inf). When exp_xyz0xyz0 is 0 and exp_xyz0dy is // inf, the product will be ill-defined. |_xyz0dy| should be smaller than // |_xyz0xyz0| in any situations. exp_xyz0xyz0 should dominate the product // exp_xyz0xyz0 * exp_xyz0dy. When exp_xyz0xyz0 is 0, the product should be 0. // All the rest exp products should be smaller than exp_xyz0xyz0 and can be // neglected. pweights = weight_x + (ix-nx0)*l1l1; for (n = 0; n < l1l1; n++) { pweights[n] = 0; } continue; } _xyz0dy = -2 * dy * tmpy; _xyz0dz = -2 * dz * tmpz; exp_xyz0xyz0 = fac * exp(_xyz0xyz0); exp_xyz0dz = exp(_xyz0dz); //exp_xyz0dy = exp(_xyz0dy); //exp_y0dy = exp_xyz0dy * exp_dydy; exp_y0dy = exp(_xyz0dy + _dydy); exp_z0z0 = exp_xyz0xyz0; exp_z0dz = exp_xyz0dz; _z0dz = _xyz0dz; iy1 = grid_close_to_yij % mesh[1] + mesh[1]; for (iy = grid_close_to_yij; iy < ny1; iy++, iy1++) { if (iy1 >= mesh[1]) { iy1 -= mesh[1]; } pweights = weights + (ix1 * mesh[1] + iy1) * mesh[2]; if (nimgz == 1) { _nonorth_dot_z_1img(weight_yz+(iy-ny0)*ngridz, pweights, mesh[2], nz0, nz1, grid_close_to_zij, exp_z0z0, exp_z0dz, exp_dzdz, _z0dz, _dzdz); } else { _nonorth_dot_z(weight_yz+(iy-ny0)*ngridz, pweights, mesh[2], nz0, nz1, grid_close_to_zij, exp_z0z0, exp_z0dz, exp_dzdz, _z0dz, _dzdz); } _z0dz += _dydz; exp_z0z0 *= exp_y0dy; exp_z0dz *= exp_dydz; exp_y0dy *= exp_2dydy; } exp_y0dy = exp(_dydy - _xyz0dy); exp_z0z0 = exp_xyz0xyz0; exp_z0dz = exp_xyz0dz; _z0dz = _xyz0dz; iy1 = (grid_close_to_yij-1) % mesh[1]; for (iy = grid_close_to_yij-1; iy >= ny0; iy--, iy1--) { if (iy1 < 0) { iy1 += mesh[1]; } exp_z0z0 *= exp_y0dy; exp_y0dy *= exp_2dydy; _z0dz -= _dydz; if (exp_dydz != 0) { exp_z0dz *= exp_dydz_i; } else { exp_z0dz = exp(_z0dz); } pweights = weights + (ix1 * mesh[1] + iy1) * mesh[2]; if (nimgz == 1) { _nonorth_dot_z_1img(weight_yz+(iy-ny0)*ngridz, pweights, mesh[2], nz0, nz1, grid_close_to_zij, exp_z0z0, exp_z0dz, exp_dzdz, _z0dz, _dzdz); } else { _nonorth_dot_z(weight_yz+(iy-ny0)*ngridz, pweights, mesh[2], nz0, nz1, grid_close_to_zij, exp_z0z0, exp_z0dz, exp_dzdz, _z0dz, _dzdz); } } dgemm_(&TRANS_N, &TRANS_N, &ngridz, &l1, &ngridy, &D1, weight_yz, &ngridz, ys_exp, &ngridy, &D0, weight_z, &ngridz); dgemm_(&TRANS_T, &TRANS_N, &l1, &l1, &ngridz, &D1, zs_exp, &ngridz, weight_z, &ngridz, &D0, weight_x+(ix-nx0)*l1l1, &l1); } dgemm_(&TRANS_N, &TRANS_N, &l1l1, &l1, &ngridx, &D1, weight_x, &l1l1, xs_exp, &ngridx, &D0, out, &l1l1); } static void _make_rij_frac(double *ri_frac, double *rij_frac, double *ri, double *rj, double ai, double aj, double *a, double *b) { double aij = ai + aj; double rij[3]; rij[0] = (ai * ri[0] + aj * rj[0]) / aij; rij[1] = (ai * ri[1] + aj * rj[1]) / aij; rij[2] = (ai * ri[2] + aj * rj[2]) / aij; // rij_frac = einsum('ij,j->ik', b, rij) rij_frac[0] = rij[0] * b[0] + rij[1] * b[1] + rij[2] * b[2]; rij_frac[1] = rij[0] * b[3] + rij[1] * b[4] + rij[2] * b[5]; rij_frac[2] = rij[0] * b[6] + rij[1] * b[7] + rij[2] * b[8]; ri_frac[0] = ri[0] * b[0] + ri[1] * b[1] + ri[2] * b[2]; ri_frac[1] = ri[0] * b[3] + ri[1] * b[4] + ri[2] * b[5]; ri_frac[2] = ri[0] * b[6] + ri[1] * b[7] + ri[2] * b[8]; } static int _init_nonorth_data(double **xs_exp, double **ys_exp, double **zs_exp, int *img_slice, int *grid_slice, int *offset, int *submesh, int *mesh, int topl, int dimension, double cutoff, double *a, double *b, double *ri_frac, double *rij_frac, double *cache) { int l1 = topl + 1; *xs_exp = cache; int ngridx = _nonorth_components(*xs_exp, img_slice, grid_slice, b, (dimension>=1), mesh[0], topl, offset[0], submesh[0], ri_frac[0], rij_frac[0], cutoff); if (ngridx == 0) { return 0; } *ys_exp = *xs_exp + l1 * ngridx; int ngridy = _nonorth_components(*ys_exp, img_slice+2, grid_slice+2, b+3, (dimension>=2), mesh[1], topl, offset[1], submesh[1], ri_frac[1], rij_frac[1], cutoff); if (ngridy == 0) { return 0; } *zs_exp = *ys_exp + l1 * ngridy; int ngridz = _nonorth_components(*zs_exp, img_slice+4, grid_slice+4, b+6, (dimension>=3), mesh[2], topl, offset[2], submesh[2], ri_frac[2], rij_frac[2], cutoff); if (ngridz == 0) { return 0; } int data_size = l1 * (ngridx + ngridy + ngridz); return data_size; } int NUMINTeval_lda_nonorth(double *weights, double *out, int comp, int li, int lj, double ai, double aj, double *ri, double *rj, double fac, double log_prec, int dimension, double *a, double *b, int *offset, int *submesh, int *mesh, double *cache) { int floorl = li; int topl = li + lj; int l1 = topl + 1; double aij = ai + aj; double cutoff = gto_rcut(aij, topl, fac, log_prec); int img_slice[6]; int grid_slice[6]; double ri_frac[3]; double rij_frac[3]; double *xs_exp, *ys_exp, *zs_exp; _make_rij_frac(ri_frac, rij_frac, ri, rj, ai, aj, a, b); int data_size = _init_nonorth_data(&xs_exp, &ys_exp, &zs_exp, img_slice, grid_slice, offset, mesh, mesh, topl, dimension, cutoff, a, b, ri_frac, rij_frac, cache); if (data_size == 0) { return 0; } cache += data_size; double *g3d = cache; double *buf = g3d + l1 * l1 * l1; cache = buf + _MAX_RR_SIZE[topl]; _nonorth_ints(g3d, weights, fac, aij, topl, dimension, a, rij_frac, mesh, img_slice, grid_slice, xs_exp, ys_exp, zs_exp, cache); _affine_trans(buf, g3d, a, floorl, topl, cache); _plain_vrr2d(out, buf, cache, li, lj, ri, rj); return 1; } int NUMINTeval_gga_nonorth(double *weights, double *out, int comp, int li, int lj, double ai, double aj, double *ri, double *rj, double fac, double log_prec, int dimension, double *a, double *b, int *offset, int *submesh, int *mesh, double *cache) { int floorl = MAX(li - 1, 0); int topl = li + 1 + lj; int l1 = topl + 1; double aij = ai + aj; double cutoff = gto_rcut(aij, topl, fac, log_prec); int img_slice[6]; int grid_slice[6]; double ri_frac[3]; double rij_frac[3]; double *xs_exp, *ys_exp, *zs_exp; _make_rij_frac(ri_frac, rij_frac, ri, rj, ai, aj, a, b); int data_size = _init_nonorth_data(&xs_exp, &ys_exp, &zs_exp, img_slice, grid_slice, offset, mesh, mesh, topl, dimension, cutoff, a, b, ri_frac, rij_frac, cache); if (data_size == 0) { return 0; } cache += data_size; int dj = _LEN_CART[lj]; double *g3d = cache; double *buf = g3d + l1 * l1 * l1; double *out_up = cache; double *out_down = out_up + _LEN_CART[li+1] * dj; cache = buf + _MAX_RR_SIZE[topl]; size_t ngrids = ((size_t)mesh[0]) * mesh[1] * mesh[2]; double *vx = weights + ngrids; double *vy = vx + ngrids; double *vz = vy + ngrids; _nonorth_ints(g3d, weights, fac, aij, li+lj, dimension, a, rij_frac, mesh, img_slice, grid_slice, xs_exp, ys_exp, zs_exp, cache); _affine_trans(buf, g3d, a, li, li+lj, cache); _plain_vrr2d(out, buf, cache, li, lj, ri, rj); _nonorth_ints(g3d, vx, fac, aij, topl, dimension, a, rij_frac, mesh, img_slice, grid_slice, xs_exp, ys_exp, zs_exp, cache); _affine_trans(buf, g3d, a, floorl, topl, cache); _plain_vrr2d_updown(out_up, out_down, buf, cache, li, lj, ri, rj); _rr_nablax_i(out, out_up, out_down, li, lj, ai); _nonorth_ints(g3d, vy, fac, aij, topl, dimension, a, rij_frac, mesh, img_slice, grid_slice, xs_exp, ys_exp, zs_exp, cache); _affine_trans(buf, g3d, a, floorl, topl, cache); _plain_vrr2d_updown(out_up, out_down, buf, cache, li, lj, ri, rj); _rr_nablay_i(out, out_up, out_down, li, lj, ai); _nonorth_ints(g3d, vz, fac, aij, topl, dimension, a, rij_frac, mesh, img_slice, grid_slice, xs_exp, ys_exp, zs_exp, cache); _affine_trans(buf, g3d, a, floorl, topl, cache); _plain_vrr2d_updown(out_up, out_down, buf, cache, li, lj, ri, rj); _rr_nablaz_i(out, out_up, out_down, li, lj, ai); return 1; } static void _apply_ints(int (*eval_ints)(), double *weights, double *mat, size_t *dims, int comp, double fac, double log_prec, int dimension, double *a, double *b, int *offset, int *submesh, int *mesh, int *shls, int *atm, int *bas, double *env, double *cache) { int i_sh = shls[0]; int j_sh = shls[1]; int li = bas(ANG_OF, i_sh); int lj = bas(ANG_OF, j_sh); double *ri = env + atm(PTR_COORD, bas(ATOM_OF, i_sh)); double *rj = env + atm(PTR_COORD, bas(ATOM_OF, j_sh)); double ai = env[bas(PTR_EXP, i_sh)]; double aj = env[bas(PTR_EXP, j_sh)]; double ci = env[bas(PTR_COEFF, i_sh)]; double cj = env[bas(PTR_COEFF, j_sh)]; double aij = ai + aj; double rrij = CINTsquare_dist(ri, rj); double eij = (ai * aj / aij) * rrij; if (eij > EIJCUTOFF) { return; } fac *= exp(-eij) * ci * cj * CINTcommon_fac_sp(li) * CINTcommon_fac_sp(lj); if (fac < env[PTR_EXPDROP]) { return; } int di = _LEN_CART[li]; int dj = _LEN_CART[lj]; double *out = cache; cache += comp * di * dj; int value = (*eval_ints)(weights, out, comp, li, lj, ai, aj, ri, rj, fac, log_prec, dimension, a, b, offset, submesh, mesh, cache); if (value != 0) { size_t naoi = dims[0]; size_t naoj = dims[1]; int i, j, ic; for (ic = 0; ic < comp; ic++) { for (j = 0; j < dj; j++) { for (i = 0; i < di; i++) { mat[j*naoi+i] += out[j*di+i]; } } mat += naoi * naoj; out += di * dj; } } } static int _nonorth_cache_size(int *mesh, int l) { int dcart = _LEN_CART[l]; int deriv = 1; int topl = l + l + deriv; int l1 = topl + 1; const int nimgs = 1; int cache_size = 0; cache_size += l1 * (mesh[0] + mesh[1] + mesh[2]) * nimgs; cache_size += mesh[1] * mesh[2]; // * nimgs * nimgs cache_size += l1 * mesh[2] * nimgs; cache_size += l1 * l1 * mesh[0]; cache_size = MAX(cache_size, _MAX_AFFINE_SIZE[topl]*2); cache_size += l1 * l1 * l1; cache_size += _MAX_RR_SIZE[topl]; return dcart*dcart + cache_size; } static int _max_cache_size(int (*fsize)(), int *shls_slice, int *bas, int *mesh) { int i, n; int i0 = MIN(shls_slice[0], shls_slice[2]); int i1 = MAX(shls_slice[1], shls_slice[3]); int cache_size = 0; for (i = i0; i < i1; i++) { n = (*fsize)(mesh, bas(ANG_OF, i)); cache_size = MAX(cache_size, n); } return cache_size+1000000; } static void shift_bas(double *env_loc, double *env, double *Ls, int ptr, int iL) { env_loc[ptr+0] = env[ptr+0] + Ls[iL*3+0]; env_loc[ptr+1] = env[ptr+1] + Ls[iL*3+1]; env_loc[ptr+2] = env[ptr+2] + Ls[iL*3+2]; } // Numerical integration for uncontracted Cartesian basis // F_mat needs to be initialized as 0 void NUMINT_fill2c(int (*eval_ints)(), double *weights, double *F_mat, int comp, int hermi, int *shls_slice, int *ao_loc, double log_prec, int dimension, int nimgs, double *Ls, double *a, double *b, int *offset, int *submesh, int *mesh, int *atm, int natm, int *bas, int nbas, double *env, int nenv) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const int cache_size = _max_cache_size(_nonorth_cache_size, shls_slice, bas, mesh); if (dimension == 0) { nimgs = 1; } #pragma omp parallel { size_t ncij = comp * naoi * naoj; size_t nijsh = nish * njsh; size_t dims[] = {naoi, naoj}; size_t ijm; int ish, jsh, ij, m, i0, j0; int shls[2]; double *cache = malloc(sizeof(double) * cache_size); double *env_loc = malloc(sizeof(double)*nenv); NPdcopy(env_loc, env, nenv); int ptrxyz; #pragma omp for schedule(dynamic) for (ijm = 0; ijm < nimgs*nijsh; ijm++) { m = ijm / nijsh; ij = ijm % nijsh; ish = ij / njsh; jsh = ij % njsh; if (hermi != PLAIN && ish > jsh) { // fill up only upper triangle of F_mat continue; } ish += ish0; jsh += jsh0; shls[0] = ish; shls[1] = jsh; i0 = ao_loc[ish] - ao_loc[ish0]; j0 = ao_loc[jsh] - ao_loc[jsh0]; if (dimension != 0) { ptrxyz = atm(PTR_COORD, bas(ATOM_OF,jsh)); shift_bas(env_loc, env, Ls, ptrxyz, m); } _apply_ints(eval_ints, weights, F_mat+m*ncij+j0*naoi+i0, dims, comp, 1., log_prec, dimension, a, b, offset, submesh, mesh, shls, atm, bas, env_loc, cache); } free(cache); free(env_loc); } } /************************************************* * * rho * *************************************************/ void GTOreverse_vrr2d_ket_inc1(double *g01, double *g00, double *rirj, int li, int lj); /* (li,lj) => (li+lj,0) */ void GTOreverse_vrr2d_ket(double *g00, double *g01, int li, int lj, double *ri, double *rj) { int nmax = li + lj; double *out = g00; double *gswap, *pg00, *pg01; int row_01, col_01, row_00, col_00, row_g; int i, j, n; double rirj[3]; rirj[0] = ri[0] - rj[0]; rirj[1] = ri[1] - rj[1]; rirj[2] = ri[2] - rj[2]; for (j = lj; j > 0; j--) { col_01 = _LEN_CART[j]; col_00 = _LEN_CART[j-1]; row_g = _CUM_LEN_CART[nmax+1-j] - _CUM_LEN_CART[li] + _LEN_CART[li]; for (n = 0; n < row_g*col_00; n++) { g00[n] = 0; } pg00 = g00; pg01 = g01; for (i = li; i <= nmax-j; i++) { GTOreverse_vrr2d_ket_inc1(pg01, pg00, rirj, i, j); row_01 = _LEN_CART[i]; row_00 = _LEN_CART[i]; pg00 += row_00 * col_00; pg01 += row_01 * col_01; } gswap = g00; g00 = g01; g01 = gswap; } if (out != g01) { row_g = _CUM_LEN_CART[nmax] - _CUM_LEN_CART[li] + _LEN_CART[li]; for (n = 0; n < row_g; n++) { out[n] = g01[n]; } } } static void _cart_to_xyz(double *dm_xyz, double *dm_cart, int floorl, int topl, int l1) { int l1l1 = l1 * l1; int l, lx, ly, lz, n; for (n = 0, l = floorl; l <= topl; l++) { for (lx = l; lx >= 0; lx--) { for (ly = l - lx; ly >= 0; ly--, n++) { lz = l - lx - ly; dm_xyz[lx*l1l1+ly*l1+lz] += dm_cart[n]; } } } } static void _orth_rho(double *rho, double *dm_xyz, double fac, int topl, int *offset, int *submesh, int *mesh, int *img_slice, int *grid_slice, double *xs_exp, double *ys_exp, double *zs_exp, double *cache) { int l1 = topl + 1; int l1l1 = l1 * l1; int nimgx0 = img_slice[0]; int nimgx1 = img_slice[1]; int nimgy0 = img_slice[2]; int nimgy1 = img_slice[3]; int nimgz0 = img_slice[4]; int nimgz1 = img_slice[5]; int nimgx = nimgx1 - nimgx0; int nimgy = nimgy1 - nimgy0; int nimgz = nimgz1 - nimgz0; int nx0 = MAX(grid_slice[0], offset[0]); int nx1 = MIN(grid_slice[1], offset[0]+submesh[0]); int ny0 = MAX(grid_slice[2], offset[1]); int ny1 = MIN(grid_slice[3], offset[1]+submesh[1]); int nz0 = MAX(grid_slice[4], offset[2]); int nz1 = MIN(grid_slice[5], offset[2]+submesh[2]); int ngridx = _num_grids_on_x(nimgx, nx0, nx1, mesh[0]); int ngridy = _num_grids_on_x(nimgy, ny0, ny1, mesh[1]); int ngridz = _num_grids_on_x(nimgz, nz0, nz1, mesh[2]); if (ngridx == 0 || ngridy == 0 || ngridz == 0) { return; } const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D0 = 0; const double D1 = 1; int xcols = submesh[1] * submesh[2]; double *xyr = cache; double *xqr = xyr + l1l1 * submesh[2]; int i, l; if (nimgz == 1) { for (l = 0; l <= topl; l++) { for (i = offset[2]; i < nz0; i++) { zs_exp[l*mesh[2]+i] = 0; } for (i = nz1; i < offset[2]+submesh[2]; i++) { zs_exp[l*mesh[2]+i] = 0; } } } else if (nimgz == 2 && !_has_overlap(nz0, nz1, mesh[2])) { for (l = 0; l <= topl; l++) { for (i = nz1; i < nz0; i++) { zs_exp[l*mesh[2]+i] = 0; } } } dgemm_(&TRANS_N, &TRANS_N, submesh+2, &l1l1, &l1, &fac, zs_exp+offset[2], mesh+2, dm_xyz, &l1, &D0, xyr, submesh+2); if (nimgy == 1) { for (l = 0; l <= topl; l++) { for (i = 0; i < (ny0-offset[1])*submesh[2]; i++) { xqr[l*xcols+i] = 0; } for (i = (ny1-offset[1])*submesh[2]; i < xcols; i++) { xqr[l*xcols+i] = 0; } dgemm_(&TRANS_N, &TRANS_T, submesh+2, &ngridy, &l1, &D1, xyr+l*l1*submesh[2], submesh+2, ys_exp+ny0, mesh+1, &D0, xqr+l*xcols+(ny0-offset[1])*submesh[2], submesh+2); } } else if (nimgy == 2 && !_has_overlap(ny0, ny1, mesh[1])) { for (l = 0; l <= topl; l++) { ngridy = ny1 - offset[1]; dgemm_(&TRANS_N, &TRANS_T, submesh+2, &ngridy, &l1, &D1, xyr+l*l1*submesh[2], submesh+2, ys_exp+offset[1], mesh+1, &D0, xqr+l*xcols, submesh+2); for (i = (ny1-offset[1])*submesh[2]; i < (ny0-offset[1])*submesh[2]; i++) { xqr[l*xcols+i] = 0; } ngridy = offset[1] + submesh[1] - ny0; dgemm_(&TRANS_N, &TRANS_T, submesh+2, &ngridy, &l1, &D1, xyr+l*l1*submesh[2], submesh+2, ys_exp+ny0, mesh+1, &D0, xqr+l*xcols+(ny0-offset[1])*submesh[2], submesh+2); } } else { for (l = 0; l <= topl; l++) { dgemm_(&TRANS_N, &TRANS_T, submesh+2, submesh+1, &l1, &D1, xyr+l*l1*submesh[2], submesh+2, ys_exp+offset[1], mesh+1, &D0, xqr+l*xcols, submesh+2); } } if (nimgx == 1) { dgemm_(&TRANS_N, &TRANS_T, &xcols, &ngridx, &l1, &D1, xqr, &xcols, xs_exp+nx0, mesh, &D1, rho+(nx0-offset[0])*xcols, &xcols); } else if (nimgx == 2 && !_has_overlap(nx0, nx1, mesh[0])) { ngridx = nx1 - offset[2]; dgemm_(&TRANS_N, &TRANS_T, &xcols, &ngridx, &l1, &D1, xqr, &xcols, xs_exp+offset[0], mesh, &D1, rho, &xcols); ngridx = offset[0] + submesh[0] - nx0; dgemm_(&TRANS_N, &TRANS_T, &xcols, &ngridx, &l1, &D1, xqr, &xcols, xs_exp+nx0, mesh, &D1, rho+(nx0-offset[0])*xcols, &xcols); } else { dgemm_(&TRANS_N, &TRANS_T, &xcols, submesh, &l1, &D1, xqr, &xcols, xs_exp+offset[0], mesh, &D1, rho, &xcols); } } static void _dm_vrr6d(double *dm_cart, double *dm, size_t naoi, int li, int lj, double *ri, double *rj, double *cache) { int di = _LEN_CART[li]; int dj = _LEN_CART[lj]; double *dm_6d = cache; int i, j; for (j = 0; j < dj; j++) { for (i = 0; i < di; i++) { dm_6d[j*di+i] = dm[j*naoi+i]; } } GTOreverse_vrr2d_ket(dm_cart, dm_6d, li, lj, ri, rj); } void NUMINTrho_lda_orth(double *rho, double *dm, int comp, size_t naoi, int li, int lj, double ai, double aj, double *ri, double *rj, double fac, double log_prec, int dimension, double *a, double *b, int *offset, int *submesh, int *mesh, double *cache) { int topl = li + lj; int l1 = topl + 1; int l1l1l1 = l1 * l1 * l1; double cutoff = gto_rcut(ai+aj, topl, fac, log_prec); int img_slice[6]; int grid_slice[6]; double *xs_exp, *ys_exp, *zs_exp; int data_size = _init_orth_data(&xs_exp, &ys_exp, &zs_exp, img_slice, grid_slice, offset, submesh, mesh, topl, dimension, cutoff, ai, aj, ri, rj, a, b, cache); if (data_size == 0) { return; } cache += data_size; double *dm_xyz = cache; cache += l1l1l1; double *dm_cart = cache; double *dm_6d = dm_cart + _MAX_RR_SIZE[topl]; _dm_vrr6d(dm_cart, dm, naoi, li, lj, ri, rj, dm_6d); NPdset0(dm_xyz, l1l1l1); _cart_to_xyz(dm_xyz, dm_cart, li, topl, l1); _orth_rho(rho, dm_xyz, fac, topl, offset, submesh, mesh, img_slice, grid_slice, xs_exp, ys_exp, zs_exp, cache); } void NUMINTrho_gga_orth(double *rho, double *dm, int comp, size_t naoi, int li, int lj, double ai, double aj, double *ri, double *rj, double fac, double log_prec, int dimension, double *a, double *b, int *offset, int *submesh, int *mesh, double *cache) { int topl = li + 1 + lj; int l1 = topl + 1; int l1l1l1 = l1 * l1 * l1; double cutoff = gto_rcut(ai+aj, topl, fac, log_prec); int img_slice[6]; int grid_slice[6]; double *xs_exp, *ys_exp, *zs_exp; int data_size = _init_orth_data(&xs_exp, &ys_exp, &zs_exp, img_slice, grid_slice, offset, submesh, mesh, topl, dimension, cutoff, ai, aj, ri, rj, a, b, cache); if (data_size == 0) { return; } cache += data_size; size_t ngrids = ((size_t)submesh[0]) * submesh[1] * submesh[2]; double *rhox = rho + ngrids; double *rhoy = rhox + ngrids; double *rhoz = rhoy + ngrids; double *dm_xyz = cache; cache += l1l1l1; double *dm_cart = cache; double *dm_6d = dm_cart + _MAX_RR_SIZE[topl]; int di = _LEN_CART[li]; int dj = _LEN_CART[lj]; int i, j, lx, ly, lz; _dm_vrr6d(dm_cart, dm, naoi, li, lj, ri, rj, dm_6d); lx = l1 - 1; NPdset0(dm_xyz, lx * lx * lx); _cart_to_xyz(dm_xyz, dm_cart, li, topl-1, lx); _orth_rho(rho, dm_xyz, fac, li+lj, offset, submesh, mesh, img_slice, grid_slice, xs_exp, ys_exp, zs_exp, cache); int di1 = _LEN_CART[li+1]; int li_1 = li - 1; int di_1 = _LEN_CART[MAX(0, li_1)]; double ai2 = -2 * ai; double fac_li; NPdset0(dm_6d, di1*dj); for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { dm_6d[di1*j+WHEREX_IF_L_INC1(i)] = dm[naoi*j+i] * ai2; } } GTOreverse_vrr2d_ket(dm_cart, dm_6d, li+1, lj, ri, rj); NPdset0(dm_xyz, l1l1l1); _cart_to_xyz(dm_xyz, dm_cart, li+1, topl, l1); if (li_1 >= 0) { for (i = 0, lx = li_1; lx >= 0; lx--) { for (ly = li_1 - lx; ly >= 0; ly--, i++) { fac_li = lx + 1; for (j = 0; j < dj; j++) { dm_6d[di_1*j+i] = dm[naoi*j+WHEREX_IF_L_INC1(i)] * fac_li; } } } GTOreverse_vrr2d_ket(dm_cart, dm_6d, li_1, lj, ri, rj); _cart_to_xyz(dm_xyz, dm_cart, li_1, topl-2, l1); } _orth_rho(rhox, dm_xyz, fac, topl, offset, submesh, mesh, img_slice, grid_slice, xs_exp, ys_exp, zs_exp, cache); NPdset0(dm_6d, _LEN_CART[li+1] * dj); for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { dm_6d[di1*j+WHEREY_IF_L_INC1(i)] = dm[naoi*j+i] * ai2; } } GTOreverse_vrr2d_ket(dm_cart, dm_6d, li+1, lj, ri, rj); NPdset0(dm_xyz, l1l1l1); _cart_to_xyz(dm_xyz, dm_cart, li+1, topl, l1); if (li_1 >= 0) { for (i = 0, lx = li_1; lx >= 0; lx--) { for (ly = li_1 - lx; ly >= 0; ly--, i++) { fac_li = ly + 1; for (j = 0; j < dj; j++) { dm_6d[di_1*j+i] = dm[naoi*j+WHEREY_IF_L_INC1(i)] * fac_li; } } } GTOreverse_vrr2d_ket(dm_cart, dm_6d, li_1, lj, ri, rj); _cart_to_xyz(dm_xyz, dm_cart, li_1, topl-2, l1); } _orth_rho(rhoy, dm_xyz, fac, topl, offset, submesh, mesh, img_slice, grid_slice, xs_exp, ys_exp, zs_exp, cache); NPdset0(dm_6d, _LEN_CART[li+1] * dj); for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { dm_6d[di1*j+WHEREZ_IF_L_INC1(i)] = dm[naoi*j+i] * ai2; } } GTOreverse_vrr2d_ket(dm_cart, dm_6d, li+1, lj, ri, rj); NPdset0(dm_xyz, l1l1l1); _cart_to_xyz(dm_xyz, dm_cart, li+1, topl, l1); if (li_1 >= 0) { for (i = 0, lx = li_1; lx >= 0; lx--) { for (ly = li_1 - lx; ly >= 0; ly--, i++) { lz = li_1 - lx - ly; fac_li = lz + 1; for (j = 0; j < dj; j++) { dm_6d[di_1*j+i] = dm[naoi*j+WHEREZ_IF_L_INC1(i)] * fac_li; } } } GTOreverse_vrr2d_ket(dm_cart, dm_6d, li_1, lj, ri, rj); _cart_to_xyz(dm_xyz, dm_cart, li_1, topl-2, l1); } _orth_rho(rhoz, dm_xyz, fac, topl, offset, submesh, mesh, img_slice, grid_slice, xs_exp, ys_exp, zs_exp, cache); } static void _nonorth_rho_z(double *rho, double *rhoz, int offset, int meshz, int nz0, int nz1, int grid_close_to_zij, double e_z0z0, double e_z0dz, double e_dzdz, double _z0dz, double _dzdz) { if (e_z0z0 == 0) { return; } double exp_2dzdz = e_dzdz * e_dzdz; double exp_z0z0, exp_z0dz; int iz, iz1; rho -= offset; // for the original indexing rho[iz1-offset] exp_z0z0 = e_z0z0; exp_z0dz = e_z0dz * e_dzdz; iz1 = grid_close_to_zij % meshz + meshz; for (iz = grid_close_to_zij-nz0; iz < nz1-nz0; iz++, iz1++) { if (iz1 >= meshz) { iz1 -= meshz; } rho[iz1] += rhoz[iz] * exp_z0z0; exp_z0z0 *= exp_z0dz; exp_z0dz *= exp_2dzdz; } exp_z0z0 = e_z0z0; if (e_z0dz != 0) { exp_z0dz = e_dzdz / e_z0dz; } else { exp_z0dz = exp(_dzdz - _z0dz); } iz1 = (grid_close_to_zij-1) % meshz; for (iz = grid_close_to_zij-nz0-1; iz >= 0; iz--, iz1--) { if (iz1 < 0) { iz1 += meshz; } exp_z0z0 *= exp_z0dz; exp_z0dz *= exp_2dzdz; rho[iz1] += rhoz[iz] * exp_z0z0; } } static void _nonorth_rho_z_1img(double *rho, double *rhoz, int offset, int meshz, int nz0, int nz1, int grid_close_to_zij, double e_z0z0, double e_z0dz, double e_dzdz, double _z0dz, double _dzdz) { if (e_z0z0 == 0) { return; } double exp_2dzdz = e_dzdz * e_dzdz; double exp_z0z0, exp_z0dz; int iz, iz1; rho -= offset; // for the original indexing rho[iz1-offset] exp_z0z0 = e_z0z0; exp_z0dz = e_z0dz * e_dzdz; iz1 = grid_close_to_zij % meshz; if (iz1 < 0) { iz1 += meshz; } for (iz = grid_close_to_zij-nz0; iz < nz1-nz0; iz++, iz1++) { rho[iz1] += rhoz[iz] * exp_z0z0; exp_z0z0 *= exp_z0dz; exp_z0dz *= exp_2dzdz; } exp_z0z0 = e_z0z0; if (e_z0dz != 0) { exp_z0dz = e_dzdz / e_z0dz; } else { exp_z0dz = exp(_dzdz - _z0dz); } iz1 = (grid_close_to_zij-1) % meshz; if (iz1 < 0) { iz1 += meshz; } for (iz = grid_close_to_zij-nz0-1; iz >= 0; iz--, iz1--) { exp_z0z0 *= exp_z0dz; exp_z0dz *= exp_2dzdz; rho[iz1] += rhoz[iz] * exp_z0z0; } } static void _nonorth_rho_z_with_mask(double *rho, double *rhoz, char *skip, int offset, int submeshz, int meshz, int nz0, int nz1, int grid_close_to_zij, double e_z0z0, double e_z0dz, double e_dzdz, double _z0dz, double _dzdz) { if (e_z0z0 == 0) { return; } double exp_2dzdz = e_dzdz * e_dzdz; double exp_z0z0, exp_z0dz; int iz, iz1; rho -= offset; // for the original indexing rho[iz1-offset] exp_z0z0 = e_z0z0; exp_z0dz = e_z0dz * e_dzdz; iz1 = grid_close_to_zij % meshz + meshz; for (iz = grid_close_to_zij-nz0; iz < nz1-nz0; iz++, iz1++) { if (iz1 >= meshz) { iz1 -= meshz; } if (!skip[iz]) { rho[iz1] += rhoz[iz] * exp_z0z0; } exp_z0z0 *= exp_z0dz; exp_z0dz *= exp_2dzdz; } exp_z0z0 = e_z0z0; if (e_z0dz != 0) { exp_z0dz = e_dzdz / e_z0dz; } else { exp_z0dz = exp(_dzdz - _z0dz); } iz1 = (grid_close_to_zij-1) % meshz; for (iz = grid_close_to_zij-nz0-1; iz >= 0; iz--, iz1--) { if (iz1 < 0) { iz1 += meshz; } exp_z0z0 *= exp_z0dz; exp_z0dz *= exp_2dzdz; if (!skip[iz]) { rho[iz1] += rhoz[iz] * exp_z0z0; } } } static int _make_grid_mask(char *skip, int nx0, int nx1, int mesh, int offset, int submesh) { if (offset == 0 && submesh == mesh) { // allows nimg > 1 return 0; } else if (offset <= nx0 && nx1 <= offset+submesh) { // requires nimg == 1 return 0; } int i, i1; i1 = nx0 % mesh + mesh; for (i = 0; i < nx1-nx0; i++, i1++) { if (i1 >= mesh) { i1 -= mesh; } if (offset <= i1 && i1 < offset+submesh) { skip[i] = 0; } else { skip[i] = 1; } } return 1; } static void _nonorth_rho(double *rho, double *dm_xyz, double fac, double aij, int topl, int dimension, double *a, double *rij_frac, double *xs_exp, double *ys_exp, double *zs_exp, int *img_slice, int *grid_slice, int *offset, int *submesh, int *mesh, double *cache) { int l1 = topl + 1; int l1l1 = l1 * l1; int nx0 = grid_slice[0]; int nx1 = grid_slice[1]; int ny0 = grid_slice[2]; int ny1 = grid_slice[3]; int nz0 = grid_slice[4]; int nz1 = grid_slice[5]; int ngridx = nx1 - nx0; int ngridy = ny1 - ny0; int ngridz = nz1 - nz0; //int nimgx0 = img_slice[0]; //int nimgx1 = img_slice[1]; //int nimgy0 = img_slice[2]; //int nimgy1 = img_slice[3]; int nimgz0 = img_slice[4]; int nimgz1 = img_slice[5]; //int nimgx = nimgx1 - nimgx0; //int nimgy = nimgy1 - nimgy0; int nimgz = nimgz1 - nimgz0; const char TRANS_T = 'T'; const char TRANS_N = 'N'; const double D0 = 0; const double D1 = 1; const int inc1 = 1; // aa = einsum('ij,kj->ik', a, a) //double aa[9]; //int n3 = 3; //dgemm_(&TRANS_T, &TRANS_N, &n3, &n3, &n3, // &aij, a, &n3, a, &n3, &D0, aa, &n3); double aa_xx = aij * (a[0] * a[0] + a[1] * a[1] + a[2] * a[2]); double aa_xy = aij * (a[0] * a[3] + a[1] * a[4] + a[2] * a[5]); double aa_xz = aij * (a[0] * a[6] + a[1] * a[7] + a[2] * a[8]); double aa_yy = aij * (a[3] * a[3] + a[4] * a[4] + a[5] * a[5]); double aa_yz = aij * (a[3] * a[6] + a[4] * a[7] + a[5] * a[8]); double aa_zz = aij * (a[6] * a[6] + a[7] * a[7] + a[8] * a[8]); int ix, iy, ix1, iy1; double dx = 1. / mesh[0]; double dy = 1. / mesh[1]; double dz = 1. / mesh[2]; //int grid_close_to_xij = rint(rij_frac[0] * mesh[0]); int grid_close_to_yij = rint(rij_frac[1] * mesh[1]); int grid_close_to_zij = rint(rij_frac[2] * mesh[2]); //grid_close_to_xij = MIN(grid_close_to_xij, nx1); //grid_close_to_xij = MAX(grid_close_to_xij, nx0); grid_close_to_yij = MIN(grid_close_to_yij, ny1); grid_close_to_yij = MAX(grid_close_to_yij, ny0); grid_close_to_zij = MIN(grid_close_to_zij, nz1); grid_close_to_zij = MAX(grid_close_to_zij, nz0); double img0_x = 0; double img0_y = 0; double img0_z = 0; double base_x = img0_x;// + dx * grid_close_to_xij; double base_y = img0_y + dy * grid_close_to_yij; double base_z = img0_z + dz * grid_close_to_zij; double x0xij = base_x - rij_frac[0]; double y0yij = base_y - rij_frac[1]; double z0zij = base_z - rij_frac[2]; double _dydy = -dy * dy * aa_yy; double _dzdz = -dz * dz * aa_zz; double _dydz = -dy * dz * aa_yz * 2; double exp_dydy = exp(_dydy); double exp_2dydy = exp_dydy * exp_dydy; double exp_dzdz = exp(_dzdz); double exp_dydz = exp(_dydz); double exp_dydz_i = (exp_dydz == 0) ? 0 : 1./exp_dydz; double x1xij, tmpx, tmpy, tmpz; double _xyz0xyz0, _xyz0dy, _xyz0dz, _z0dz; double exp_xyz0xyz0, exp_xyz0dz; double exp_y0dy, exp_z0z0, exp_z0dz; int xcols = ngridy * ngridz; double *xyr = cache; double *xqr = xyr + l1l1 * ngridz; double *rhoz = xqr + l1 * ngridy * ngridz; double *prho; int l; char x_skip[ngridx]; char y_skip[ngridy]; char z_skip[ngridz]; int with_x_mask = _make_grid_mask(x_skip, nx0, nx1, mesh[0], offset[0], submesh[0]); int with_y_mask = _make_grid_mask(y_skip, ny0, ny1, mesh[1], offset[1], submesh[1]); int with_z_mask = _make_grid_mask(z_skip, nz0, nz1, mesh[2], offset[2], submesh[2]); dgemm_(&TRANS_N, &TRANS_N, &ngridz, &l1l1, &l1, &D1, zs_exp, &ngridz, dm_xyz, &l1, &D0, xyr, &ngridz); for (l = 0; l <= topl; l++) { dgemm_(&TRANS_N, &TRANS_T, &ngridz, &ngridy, &l1, &D1, xyr+l*l1*ngridz, &ngridz, ys_exp, &ngridy, &D0, xqr+l*xcols, &ngridz); } ix1 = nx0 % mesh[0] + mesh[0]; for (ix = 0; ix < nx1-nx0; ix++, ix1++) { if (ix1 >= mesh[0]) { ix1 -= mesh[0]; } if (with_x_mask && x_skip[ix]) { continue; } x1xij = x0xij + (nx0+ix)*dx; tmpx = x1xij * aa_xx + y0yij * aa_xy + z0zij * aa_xz; tmpy = x1xij * aa_xy + y0yij * aa_yy + z0zij * aa_yz; tmpz = x1xij * aa_xz + y0yij * aa_yz + z0zij * aa_zz; _xyz0xyz0 = -x1xij * tmpx - y0yij * tmpy - z0zij * tmpz; if (_xyz0xyz0 < EXPMIN) { continue; } _xyz0dy = -2 * dy * tmpy; _xyz0dz = -2 * dz * tmpz; exp_xyz0xyz0 = fac * exp(_xyz0xyz0); exp_xyz0dz = exp(_xyz0dz); //exp_xyz0dy = exp(_xyz0dy); //exp_y0dy = exp_xyz0dy * exp_dydy; exp_y0dy = exp(_xyz0dy + _dydy); exp_z0z0 = exp_xyz0xyz0; exp_z0dz = exp_xyz0dz; _z0dz = _xyz0dz; iy1 = grid_close_to_yij % mesh[1] + mesh[1]; for (iy = grid_close_to_yij-ny0; iy < ny1-ny0; iy++, iy1++) { if (exp_z0z0 == 0) { break; } if (iy1 >= mesh[1]) { iy1 -= mesh[1]; } if (!with_y_mask || !y_skip[iy]) { dgemm_(&TRANS_N, &TRANS_T, &ngridz, &inc1, &l1, &D1, xqr+iy*ngridz, &xcols, xs_exp+ix, &ngridx, &D0, rhoz, &ngridz); prho = rho + ((ix1-offset[0])*submesh[1] + iy1-offset[1]) * submesh[2]; if (nimgz == 1) { _nonorth_rho_z_1img(prho, rhoz, offset[2], mesh[2], nz0, nz1, grid_close_to_zij, exp_z0z0, exp_z0dz, exp_dzdz, _z0dz, _dzdz); } else if (with_z_mask) { _nonorth_rho_z_with_mask(prho, rhoz, z_skip, offset[2], submesh[2], mesh[2], nz0, nz1, grid_close_to_zij, exp_z0z0, exp_z0dz, exp_dzdz, _z0dz, _dzdz); } else { _nonorth_rho_z(prho, rhoz, offset[2], mesh[2], nz0, nz1, grid_close_to_zij, exp_z0z0, exp_z0dz, exp_dzdz, _z0dz, _dzdz); } } _z0dz += _dydz; exp_z0z0 *= exp_y0dy; exp_z0dz *= exp_dydz; exp_y0dy *= exp_2dydy; } exp_y0dy = exp(_dydy - _xyz0dy); exp_z0z0 = exp_xyz0xyz0; exp_z0dz = exp_xyz0dz; _z0dz = _xyz0dz; iy1 = (grid_close_to_yij-1) % mesh[1]; for (iy = grid_close_to_yij-ny0-1; iy >= 0; iy--, iy1--) { exp_z0z0 *= exp_y0dy; if (exp_z0z0 == 0) { break; } _z0dz -= _dydz; if (exp_dydz != 0) { exp_z0dz *= exp_dydz_i; } else { exp_z0dz = exp(_z0dz); } exp_y0dy *= exp_2dydy; if (iy1 < 0) { iy1 += mesh[1]; } if (!with_y_mask || !y_skip[iy]) { dgemm_(&TRANS_N, &TRANS_T, &ngridz, &inc1, &l1, &D1, xqr+iy*ngridz, &xcols, xs_exp+ix, &ngridx, &D0, rhoz, &ngridz); prho = rho + ((ix1-offset[0])*submesh[1] + iy1-offset[1]) * submesh[2]; if (nimgz == 1) { _nonorth_rho_z_1img(prho, rhoz, offset[2], mesh[2], nz0, nz1, grid_close_to_zij, exp_z0z0, exp_z0dz, exp_dzdz, _z0dz, _dzdz); } else if (with_z_mask) { _nonorth_rho_z_with_mask(prho, rhoz, z_skip, offset[2], submesh[2], mesh[2], nz0, nz1, grid_close_to_zij, exp_z0z0, exp_z0dz, exp_dzdz, _z0dz, _dzdz); } else { _nonorth_rho_z(prho, rhoz, offset[2], mesh[2], nz0, nz1, grid_close_to_zij, exp_z0z0, exp_z0dz, exp_dzdz, _z0dz, _dzdz); } } } } } void NUMINTrho_lda_nonorth(double *rho, double *dm, int comp, size_t naoi, int li, int lj, double ai, double aj, double *ri, double *rj, double fac, double log_prec, int dimension, double *a, double *b, int *offset, int *submesh, int *mesh, double *cache) { int floorl = li; int topl = li + lj; int l1 = topl + 1; double aij = ai + aj; double cutoff = gto_rcut(aij, topl, fac, log_prec); int img_slice[6]; int grid_slice[6]; double ri_frac[3]; double rij_frac[3]; double *xs_exp, *ys_exp, *zs_exp; _make_rij_frac(ri_frac, rij_frac, ri, rj, ai, aj, a, b); int data_size = _init_nonorth_data(&xs_exp, &ys_exp, &zs_exp, img_slice, grid_slice, offset, submesh, mesh, topl, dimension, cutoff, a, b, ri_frac, rij_frac, cache); if (data_size == 0) { return; } cache += data_size; double *dm_xyz = cache; cache += l1 * l1 * l1; double *dm_cart = cache; double *dm_cache = dm_cart + _CUM_LEN_CART[topl]; _dm_vrr6d(dm_cart, dm, naoi, li, lj, ri, rj, dm_cart+_MAX_RR_SIZE[topl]); _reverse_affine_trans(dm_xyz, dm_cart, a, floorl, topl, dm_cache); _nonorth_rho(rho, dm_xyz, fac, aij, topl, dimension, a, rij_frac, xs_exp, ys_exp, zs_exp, img_slice, grid_slice, offset, submesh, mesh, cache); } static void _merge_dm_xyz_updown(double *dm_xyz, double *dm_xyz1, int l1) { int l0 = l1 - 2; int l1l1 = l1 * l1; int l0l0 = l0 * l0; int i, j, k; for (i = 0; i < l0; i++) { for (j = 0; j < l0; j++) { for (k = 0; k < l0; k++) { dm_xyz[i*l1l1+j*l1+k] += dm_xyz1[i*l0l0+j*l0+k]; } } } } void NUMINTrho_gga_nonorth(double *rho, double *dm, int comp, size_t naoi, int li, int lj, double ai, double aj, double *ri, double *rj, double fac, double log_prec, int dimension, double *a, double *b, int *offset, int *submesh, int *mesh, double *cache) { int topl = li + 1 + lj; int l1 = topl + 1; int l1l1 = l1 * l1; double aij = ai + aj; double cutoff = gto_rcut(aij, topl, fac, log_prec); int img_slice[6]; int grid_slice[6]; double ri_frac[3]; double rij_frac[3]; double *xs_exp, *ys_exp, *zs_exp; _make_rij_frac(ri_frac, rij_frac, ri, rj, ai, aj, a, b); int data_size = _init_nonorth_data(&xs_exp, &ys_exp, &zs_exp, img_slice, grid_slice, offset, submesh, mesh, topl, dimension, cutoff, a, b, ri_frac, rij_frac, cache); if (data_size == 0) { return; } cache += data_size; size_t ngrids = ((size_t)submesh[0]) * submesh[1] * submesh[2]; double *rhox = rho + ngrids; double *rhoy = rhox + ngrids; double *rhoz = rhoy + ngrids; double *dm_xyz = cache; double *dm_xyz1 = dm_xyz + l1l1 * l1; cache += l1l1 * l1 * 2; double *dm_cart = cache; double *dm_6d = dm_cart + _MAX_RR_SIZE[topl]; int di = _LEN_CART[li]; int dj = _LEN_CART[lj]; int i, j, lx, ly, lz; _dm_vrr6d(dm_cart, dm, naoi, li, lj, ri, rj, dm_6d); lx = l1 - 1; _reverse_affine_trans(dm_xyz, dm_cart, a, li, li+lj, dm_6d); _nonorth_rho(rho, dm_xyz, fac, aij, li+lj, dimension, a, rij_frac, xs_exp, ys_exp, zs_exp, img_slice, grid_slice, offset, submesh, mesh, cache); int di1 = _LEN_CART[li+1]; int li_1 = li - 1; int di_1 = _LEN_CART[MAX(0, li_1)]; double ai2 = -2 * ai; double fac_li; NPdset0(dm_6d, _LEN_CART[li+1] * dj); for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { dm_6d[di1*j+WHEREX_IF_L_INC1(i)] = dm[naoi*j+i] * ai2; } } GTOreverse_vrr2d_ket(dm_cart, dm_6d, li+1, lj, ri, rj); _reverse_affine_trans(dm_xyz, dm_cart, a, li+1, topl, dm_6d); if (li_1 >= 0) { for (i = 0, lx = li_1; lx >= 0; lx--) { for (ly = li_1 - lx; ly >= 0; ly--, i++) { fac_li = lx + 1; for (j = 0; j < dj; j++) { dm_6d[di_1*j+i] = dm[naoi*j+WHEREX_IF_L_INC1(i)] * fac_li; } } } GTOreverse_vrr2d_ket(dm_cart, dm_6d, li_1, lj, ri, rj); _reverse_affine_trans(dm_xyz1, dm_cart, a, li_1, topl-2, dm_6d); _merge_dm_xyz_updown(dm_xyz, dm_xyz1, l1); } _nonorth_rho(rhox, dm_xyz, fac, aij, topl, dimension, a, rij_frac, xs_exp, ys_exp, zs_exp, img_slice, grid_slice, offset, submesh, mesh, cache); NPdset0(dm_6d, _LEN_CART[li+1] * dj); for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { dm_6d[di1*j+WHEREY_IF_L_INC1(i)] = dm[naoi*j+i] * ai2; } } GTOreverse_vrr2d_ket(dm_cart, dm_6d, li+1, lj, ri, rj); _reverse_affine_trans(dm_xyz, dm_cart, a, li+1, topl, dm_6d); if (li_1 >= 0) { for (i = 0, lx = li_1; lx >= 0; lx--) { for (ly = li_1 - lx; ly >= 0; ly--, i++) { fac_li = ly + 1; for (j = 0; j < dj; j++) { dm_6d[di_1*j+i] = dm[naoi*j+WHEREY_IF_L_INC1(i)] * fac_li; } } } GTOreverse_vrr2d_ket(dm_cart, dm_6d, li_1, lj, ri, rj); _reverse_affine_trans(dm_xyz1, dm_cart, a, li_1, topl-2, dm_6d); _merge_dm_xyz_updown(dm_xyz, dm_xyz1, l1); } _nonorth_rho(rhoy, dm_xyz, fac, aij, topl, dimension, a, rij_frac, xs_exp, ys_exp, zs_exp, img_slice, grid_slice, offset, submesh, mesh, cache); NPdset0(dm_6d, _LEN_CART[li+1] * dj); for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { dm_6d[di1*j+WHEREZ_IF_L_INC1(i)] = dm[naoi*j+i] * ai2; } } GTOreverse_vrr2d_ket(dm_cart, dm_6d, li+1, lj, ri, rj); _reverse_affine_trans(dm_xyz, dm_cart, a, li+1, topl, dm_6d); if (li_1 >= 0) { for (i = 0, lx = li_1; lx >= 0; lx--) { for (ly = li_1 - lx; ly >= 0; ly--, i++) { lz = li_1 - lx - ly; fac_li = lz + 1; for (j = 0; j < dj; j++) { dm_6d[di_1*j+i] = dm[naoi*j+WHEREZ_IF_L_INC1(i)] * fac_li; } } } GTOreverse_vrr2d_ket(dm_cart, dm_6d, li_1, lj, ri, rj); _reverse_affine_trans(dm_xyz1, dm_cart, a, li_1, topl-2, dm_6d); _merge_dm_xyz_updown(dm_xyz, dm_xyz1, l1); } _nonorth_rho(rhoz, dm_xyz, fac, aij, topl, dimension, a, rij_frac, xs_exp, ys_exp, zs_exp, img_slice, grid_slice, offset, submesh, mesh, cache); } static void _apply_rho(void (*eval_rho)(), double *rho, double *dm, size_t *dims, int comp, double log_prec, int dimension, double *a, double *b, int *offset, int *submesh, int *mesh, int *shls, int *atm, int natm, int *bas, int nbas, double *env, double *cache) { const size_t naoi = dims[0]; const int i_sh = shls[0]; const int j_sh = shls[1]; const int li = bas(ANG_OF, i_sh); const int lj = bas(ANG_OF, j_sh); double *ri = env + atm(PTR_COORD, bas(ATOM_OF, i_sh)); double *rj = env + atm(PTR_COORD, bas(ATOM_OF, j_sh)); double ai = env[bas(PTR_EXP, i_sh)]; double aj = env[bas(PTR_EXP, j_sh)]; double ci = env[bas(PTR_COEFF, i_sh)]; double cj = env[bas(PTR_COEFF, j_sh)]; double aij = ai + aj; double rrij = CINTsquare_dist(ri, rj); double eij = (ai * aj / aij) * rrij; if (eij > EIJCUTOFF) { return; } double fac = exp(-eij) * ci * cj * CINTcommon_fac_sp(li) * CINTcommon_fac_sp(lj); if (fac < env[PTR_EXPDROP]) { return; } (*eval_rho)(rho, dm, comp, naoi, li, lj, ai, aj, ri, rj, fac, log_prec, dimension, a, b, offset, submesh, mesh, cache); } static int _rho_cache_size(int l, int comp, int *mesh) { int l1 = l * 2 + 1; int cache_size = 0; cache_size += l1 * mesh[1] * mesh[2]; cache_size += l1 * l1 * mesh[2] * 2; cache_size = MAX(cache_size, 3*_MAX_RR_SIZE[l*2]); cache_size = MAX(cache_size, _CUM_LEN_CART[l*2]+2*_MAX_AFFINE_SIZE[l*2]); cache_size += l1 * (mesh[0] + mesh[1] + mesh[2]); cache_size += l1 * l1 * l1; return cache_size + 1000000; } /* * F_dm are a set of uncontracted cartesian density matrices * Note rho is updated inplace. */ void NUMINT_rho_drv(void (*eval_rho)(), double *rho, double *F_dm, int comp, int hermi, int *shls_slice, int *ao_loc, double log_prec, int dimension, int nimgs, double *Ls, double *a, double *b, int *offset, int *submesh, int *mesh, int *atm, int natm, int *bas, int nbas, double *env, int nenv) { int ish0 = shls_slice[0]; int ish1 = shls_slice[1]; int jsh0 = shls_slice[2]; int jsh1 = shls_slice[3]; int nish = ish1 - ish0; int njsh = jsh1 - jsh0; size_t naoi = ao_loc[ish1] - ao_loc[ish0]; size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; size_t nao2 = naoi * naoi; int lmax = 0; int ib; for (ib = 0; ib < nbas; ib++) { lmax = MAX(lmax, bas(ANG_OF, ib)); } int cache_size = _rho_cache_size(lmax, comp, submesh); size_t ngrids = ((size_t)submesh[0]) * submesh[1] * submesh[2]; if (dimension == 0) { nimgs = 1; } double *rhobufs[MAX_THREADS]; #pragma omp parallel { size_t ncij = naoi * naoj; size_t nijsh = nish * njsh; size_t dims[] = {naoi, naoj}; size_t ijm; int ish, jsh, ij, m, i0, j0; int shls[2]; double *cache = malloc(sizeof(double) * cache_size); double *env_loc = malloc(sizeof(double)*nenv); NPdcopy(env_loc, env, nenv); int ptrxyz; int thread_id = omp_get_thread_num(); double *rho_priv, *pdm; if (thread_id == 0) { rho_priv = rho; } else { rho_priv = calloc(comp*ngrids, sizeof(double)); } rhobufs[thread_id] = rho_priv; if (hermi) { // Note hermitian character of the density matrices can only be found by // rearranging the repeated images: // dmR - dmR[::-1].transpose(0,2,1) == 0 #pragma omp for schedule(static) for (m = 0; m < nimgs; m++) { pdm = F_dm + m * nao2; for (j0 = 1; j0 < naoi; j0++) { for (i0 = 0; i0 < j0; i0++) { pdm[j0*naoi+i0] *= 2; pdm[i0*naoi+j0] = 0; } } } } #pragma omp for schedule(dynamic) for (ijm = 0; ijm < nimgs*nijsh; ijm++) { m = ijm / nijsh; ij = ijm % nijsh; ish = ij / njsh; jsh = ij % njsh; if (hermi != PLAIN && ish > jsh) { continue; } ish += ish0; jsh += jsh0; shls[0] = ish; shls[1] = jsh; i0 = ao_loc[ish] - ao_loc[ish0]; j0 = ao_loc[jsh] - ao_loc[jsh0]; if (dimension != 0) { ptrxyz = atm(PTR_COORD, bas(ATOM_OF,ish)); shift_bas(env_loc, env, Ls, ptrxyz, m); } _apply_rho(eval_rho, rho_priv, F_dm+m*ncij+j0*naoi+i0, dims, comp, log_prec, dimension, a, b, offset, submesh, mesh, shls, atm, natm, bas, nbas, env_loc, cache); } NPomp_dsum_reduce_inplace(rhobufs, comp*ngrids); free(cache); free(env_loc); if (thread_id != 0) { free(rho_priv); } } }
par_strength.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * *****************************************************************************/ /* following should be in a header file */ #include "_hypre_parcsr_ls.h" #include "hypre_hopscotch_hash.h" /*==========================================================================*/ /*==========================================================================*/ /** Generates strength matrix Notes: \begin{itemize} \item The underlying matrix storage scheme is a hypre_ParCSR matrix. \item The routine returns the following: \begin{itemize} \item S - a ParCSR matrix representing the "strength matrix". This is used in the coarsening and interpolation routines. \end{itemize} \item The graph of the "strength matrix" for A is a subgraph of the graph of A, but requires nonsymmetric storage even if A is symmetric. This is because of the directional nature of the "strengh of dependence" notion (see below). Since we are using nonsymmetric storage for A right now, this is not a problem. If we ever add the ability to store A symmetrically, then we could store the strength graph as floats instead of doubles to save space. \item This routine currently "compresses" the strength matrix. We should consider the possibility of defining this matrix to have the same "nonzero structure" as A. To do this, we could use the same A\_i and A\_j arrays, and would need only define the S\_data array. There are several pros and cons to discuss. \end{itemize} Terminology: \begin{itemize} \item Ruge's terminology: A point is "strongly connected to" $j$, or "strongly depends on" $j$, if $-a_ij >= \theta max_{l != j} \{-a_il\}$. \item Here, we retain some of this terminology, but with a more generalized notion of "strength". We also retain the "natural" graph notation for representing the directed graph of a matrix. That is, the nonzero entry $a_ij$ is represented as: i --> j. In the strength matrix, S, the entry $s_ij$ is also graphically denoted as above, and means both of the following: \begin{itemize} \item $i$ "depends on" $j$ with "strength" $s_ij$ \item $j$ "influences" $i$ with "strength" $s_ij$ \end{itemize} \end{itemize} {\bf Input files:} _hypre_parcsr_ls.h @return Error code. @param A [IN] coefficient matrix @param strength_threshold [IN] threshold parameter used to define strength @param max_row_sum [IN] parameter used to modify definition of strength for diagonal dominant matrices @param S_ptr [OUT] strength matrix @see */ /*--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSHost(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = NULL; HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int num_cols_offd = 0; hypre_ParCSRMatrix *S; hypre_CSRMatrix *S_diag; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; /* HYPRE_Real *S_diag_data; */ hypre_CSRMatrix *S_offd; HYPRE_Int *S_offd_i = NULL; HYPRE_Int *S_offd_j = NULL; /* HYPRE_Real *S_offd_data; */ HYPRE_Real diag, row_scale, row_sum; HYPRE_Int i, jA, jS; HYPRE_Int ierr = 0; HYPRE_Int *dof_func_offd; HYPRE_Int num_sends; HYPRE_Int *int_buf_data; HYPRE_Int index, start, j; HYPRE_Int *prefix_sum_workspace; /*-------------------------------------------------------------- * Compute a ParCSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ num_nonzeros_diag = A_diag_i[num_variables]; num_cols_offd = hypre_CSRMatrixNumCols(A_offd); A_offd_i = hypre_CSRMatrixI(A_offd); num_nonzeros_offd = A_offd_i[num_variables]; S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars, row_starts, row_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); /* row_starts is owned by A, col_starts = row_starts */ hypre_ParCSRMatrixSetRowStartsOwner(S,0); S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); S_offd = hypre_ParCSRMatrixOffd(S); hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_temp_diag_j = hypre_CSRMatrixJ(S_diag); S_offd_i = hypre_CSRMatrixI(S_offd); S_diag_j = hypre_TAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); HYPRE_Int *S_temp_offd_j = NULL; dof_func_offd = NULL; if (num_cols_offd) { A_offd_data = hypre_CSRMatrixData(A_offd); hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); S_temp_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_BigInt *col_map_offd_S = hypre_TAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(S) = col_map_offd_S; if (num_functions > 1) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } S_offd_j = hypre_TAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols_offd; i++) { col_map_offd_S[i] = col_map_offd_A[i]; } } /*------------------------------------------------------------------- * Get the dof_func data for the off-processor columns *-------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_functions > 1) { int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } /*HYPRE_Int prefix_sum_workspace[2*(hypre_NumThreads() + 1)];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(hypre_NumThreads() + 1), HYPRE_MEMORY_HOST); /* give S same nonzero structure as A */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,diag,row_scale,row_sum,jA,jS) #endif { HYPRE_Int start, stop; hypre_GetSimpleThreadPartition(&start, &stop, num_variables); HYPRE_Int jS_diag = 0, jS_offd = 0; for (i = start; i < stop; i++) { S_diag_i[i] = jS_diag; if (num_cols_offd) { S_offd_i[i] = jS_offd; } diag = A_diag_data[A_diag_i[i]]; /* compute scaling factor and row sum */ row_scale = 0.0; row_sum = diag; if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (dof_func[i] == dof_func[A_diag_j[jA]]) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (dof_func[i] == dof_func_offd[A_offd_j[jA]]) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (dof_func[i] == dof_func[A_diag_j[jA]]) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (dof_func[i] == dof_func_offd[A_offd_j[jA]]) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag >= 0 */ } /* num_functions > 1 */ else { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } /* diag >= 0*/ } /* num_functions <= 1 */ jS_diag += A_diag_i[i + 1] - A_diag_i[i] - 1; jS_offd += A_offd_i[i + 1] - A_offd_i[i]; /* compute row entries of S */ S_temp_diag_j[A_diag_i[i]] = -1; if ((fabs(row_sum) > fabs(diag)*max_row_sum) && (max_row_sum < 1.0)) { /* make all dependencies weak */ for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { S_temp_diag_j[jA] = -1; } jS_diag -= A_diag_i[i + 1] - (A_diag_i[i] + 1); for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { S_temp_offd_j[jA] = -1; } jS_offd -= A_offd_i[i + 1] - A_offd_i[i]; } else { if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] <= strength_threshold * row_scale || dof_func[i] != dof_func[A_diag_j[jA]]) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] <= strength_threshold * row_scale || dof_func[i] != dof_func_offd[A_offd_j[jA]]) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] >= strength_threshold * row_scale || dof_func[i] != dof_func[A_diag_j[jA]]) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] >= strength_threshold * row_scale || dof_func[i] != dof_func_offd[A_offd_j[jA]]) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } /* diag >= 0 */ } /* num_functions > 1 */ else { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] <= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] <= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] >= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] >= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } /* diag >= 0 */ } /* num_functions <= 1 */ } /* !((row_sum > max_row_sum) && (max_row_sum < 1.0)) */ } /* for each variable */ hypre_prefix_sum_pair(&jS_diag, S_diag_i + num_variables, &jS_offd, S_offd_i + num_variables, prefix_sum_workspace); /*-------------------------------------------------------------- * "Compress" the strength matrix. * * NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor! * * NOTE: This "compression" section of code may be removed, and * coarsening will still be done correctly. However, the routine * that builds interpolation would have to be modified first. *----------------------------------------------------------------*/ for (i = start; i < stop; i++) { S_diag_i[i] += jS_diag; S_offd_i[i] += jS_offd; jS = S_diag_i[i]; for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { if (S_temp_diag_j[jA] > -1) { S_diag_j[jS] = S_temp_diag_j[jA]; jS++; } } jS = S_offd_i[i]; for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (S_temp_offd_j[jA] > -1) { S_offd_j[jS] = S_temp_offd_j[jA]; jS++; } } } /* for each variable */ } /* omp parallel */ hypre_CSRMatrixNumNonzeros(S_diag) = S_diag_i[num_variables]; hypre_CSRMatrixNumNonzeros(S_offd) = S_offd_i[num_variables]; hypre_CSRMatrixJ(S_diag) = S_diag_j; hypre_CSRMatrixJ(S_offd) = S_offd_j; hypre_CSRMatrixMemoryLocation(S_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixMemoryLocation(S_offd) = HYPRE_MEMORY_HOST; hypre_ParCSRMatrixCommPkg(S) = NULL; *S_ptr = S; hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_offd_j, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] += hypre_MPI_Wtime(); #endif return (ierr); } /* ----------------------------------------------------------------------- */ HYPRE_Int hypre_BoomerAMGCreateS(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { #if defined(HYPRE_USING_CUDA) hypre_NvtxPushRange("CreateS"); #endif HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(A)) ); if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_BoomerAMGCreateSDevice(A,strength_threshold,max_row_sum,num_functions,dof_func,S_ptr); } else #endif { ierr = hypre_BoomerAMGCreateSHost(A,strength_threshold,max_row_sum,num_functions,dof_func,S_ptr); } #if defined(HYPRE_USING_CUDA) hypre_NvtxPopRange(); #endif return ierr; } /* ----------------------------------------------------------------------- */ /* Create Strength matrix from CF marker array data. Provides a more general form to build S for specific nodes of the 'global' matrix (for example, F points or A_FF part), given the entire matrix. These nodes have the SMRK tag. Could possibly be merged with BoomerAMGCreateS() to yield a more general function. */ HYPRE_Int hypre_BoomerAMGCreateSFromCFMarker(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int *CF_marker, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int SMRK, hypre_ParCSRMatrix **S_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = NULL; HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int num_cols_offd = 0; hypre_ParCSRMatrix *S; hypre_CSRMatrix *S_diag; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; /* HYPRE_Real *S_diag_data; */ hypre_CSRMatrix *S_offd; HYPRE_Int *S_offd_i = NULL; HYPRE_Int *S_offd_j = NULL; /* HYPRE_Real *S_offd_data; */ HYPRE_Int *dof_func_offd = NULL; HYPRE_Real diag, row_scale, row_sum; HYPRE_Int i, jj, jA, jS; HYPRE_Int num_sends, start, j, index; HYPRE_Int *int_buf_data; HYPRE_Int ierr = 0; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *prefix_sum_workspace; HYPRE_Int my_id; /*-------------------------------------------------------------- * Compute a ParCSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ hypre_MPI_Comm_rank(comm, &my_id); num_nonzeros_diag = A_diag_i[num_variables]; num_cols_offd = hypre_CSRMatrixNumCols(A_offd); A_offd_i = hypre_CSRMatrixI(A_offd); num_nonzeros_offd = A_offd_i[num_variables]; S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars, row_starts, row_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); /* row_starts is owned by A, col_starts = row_starts */ hypre_ParCSRMatrixSetRowStartsOwner(S,0); S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); S_offd = hypre_ParCSRMatrixOffd(S); hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_temp_diag_j = hypre_CSRMatrixJ(S_diag); S_offd_i = hypre_CSRMatrixI(S_offd); S_diag_j = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); HYPRE_Int *S_temp_offd_j = NULL; if (num_cols_offd) { A_offd_data = hypre_CSRMatrixData(A_offd); hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); S_temp_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_BigInt *col_map_offd_S = hypre_TAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(S) = col_map_offd_S; if (num_functions > 1) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } S_offd_j = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols_offd; i++) { col_map_offd_S[i] = col_map_offd_A[i]; } } /*------------------------------------------------------------------- * Get the dof_func data for the off-processor columns *-------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_functions > 1) { int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (num_cols_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); /*HYPRE_Int prefix_sum_workspace[2*(hypre_NumThreads() + 1)];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(hypre_NumThreads() + 1), HYPRE_MEMORY_HOST); /* give S same nonzero structure as A */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,diag,row_scale,row_sum,jA,jS) #endif { HYPRE_Int start, stop; hypre_GetSimpleThreadPartition(&start, &stop, num_variables); HYPRE_Int jS_diag = 0, jS_offd = 0; for (i = start; i < stop; i++) { if (CF_marker[i] == SMRK) { S_diag_i[i] = jS_diag; if (num_cols_offd) { S_offd_i[i] = jS_offd; } diag = A_diag_data[A_diag_i[i]]; /* compute scaling factor and row sum */ row_scale = 0.0; row_sum = diag; if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if ((CF_marker[jj] == SMRK) && (dof_func[i] == dof_func[jj])) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if ((CF_marker_offd[jj] == SMRK) && (dof_func[i] == dof_func_offd[jj])) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag < 0 */ else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if ((CF_marker[jj] == SMRK) && (dof_func[i] == dof_func[jj])) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if ((CF_marker_offd[jj] == SMRK) && (dof_func[i] == dof_func_offd[A_offd_j[jA]])) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag >= 0 */ } /* num_functions > 1 */ else { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag < 0 */ else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag >= 0*/ } /* num_functions <=1 */ /* compute row entries of S */ S_temp_diag_j[A_diag_i[i]] = -1; if ((fabs(row_sum) > fabs(diag)*max_row_sum) && (max_row_sum < 1.0)) { /* make all dependencies weak */ for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { S_temp_diag_j[jA] = -1; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { S_temp_offd_j[jA] = -1; } } else { if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { if ((A_diag_data[jA] <= strength_threshold * row_scale) || (dof_func[i] != dof_func[jj])) { S_temp_diag_j[jA] = -1; } else { S_temp_diag_j[jA] = jj; ++jS_diag; } } else { S_temp_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { if ((A_offd_data[jA] <= strength_threshold * row_scale) || (dof_func[i] != dof_func_offd[jj])) { S_temp_offd_j[jA] = -1; } else { S_temp_offd_j[jA] = jj; ++jS_offd; } } else { S_temp_offd_j[jA] = -1; } } } /* end diag < 0 */ else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { if ((A_diag_data[jA] >= strength_threshold * row_scale) || (dof_func[i] != dof_func[jj])) { S_temp_diag_j[jA] = -1; } else { S_temp_diag_j[jA] = jj; ++jS_diag; } } else { S_temp_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { if ((A_offd_data[jA] >= strength_threshold * row_scale) || (dof_func[i] != dof_func_offd[jj])) { S_temp_offd_j[jA] = -1; } else { S_temp_offd_j[jA] = jj; ++jS_offd; } } else { S_temp_offd_j[jA] = -1; } } } /* diag >= 0 */ } /* num_functions > 1 */ else { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { if (A_diag_data[jA] <= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; } else { S_temp_diag_j[jA] = jj; ++jS_diag; } } else { S_temp_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { if (A_offd_data[jA] <= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; } else { S_temp_offd_j[jA] = jj; ++jS_offd; } } else { S_temp_offd_j[jA] = -1; } } } /* diag < 0 */ else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { if (A_diag_data[jA] >= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; } else { S_temp_diag_j[jA] = jj; ++jS_diag; } } else { S_temp_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { if (A_offd_data[jA] >= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; } else { S_temp_offd_j[jA] = jj; ++jS_offd; } } else { S_temp_offd_j[jA] = -1; } } } /* diag >= 0 */ } /* num_functions <=1 */ } /* !((row_sum > max_row_sum) && (max_row_sum < 1.0)) */ } /* CF_marker == SMRK */ else { S_diag_i[i] = jS_diag; if (num_cols_offd) { S_offd_i[i] = jS_offd; } for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { S_temp_diag_j[jA] = -1; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { S_temp_offd_j[jA] = -1; } } /* CF_marker != SMRK */ } /* for each variable */ hypre_prefix_sum_pair(&jS_diag, S_diag_i + num_variables, &jS_offd, S_offd_i + num_variables, prefix_sum_workspace); /*-------------------------------------------------------------- * "Compress" the strength matrix. * * NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor! * * NOTE: This "compression" section of code may be removed, and * coarsening will still be done correctly. However, the routine * that builds interpolation would have to be modified first. *----------------------------------------------------------------*/ for (i = start; i < stop; i++) { S_diag_i[i] += jS_diag; S_offd_i[i] += jS_offd; jS = S_diag_i[i]; for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { if (S_temp_diag_j[jA] > -1) { S_diag_j[jS] = S_temp_diag_j[jA]; jS++; } } jS = S_offd_i[i]; for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (S_temp_offd_j[jA] > -1) { S_offd_j[jS] = S_temp_offd_j[jA]; jS++; } } } /* for each variable */ } /* omp parallel */ hypre_CSRMatrixNumNonzeros(S_diag) = S_diag_i[num_variables]; hypre_CSRMatrixNumNonzeros(S_offd) = S_offd_i[num_variables]; hypre_CSRMatrixJ(S_diag) = S_diag_j; hypre_CSRMatrixJ(S_offd) = S_offd_j; hypre_CSRMatrixMemoryLocation(S_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixMemoryLocation(S_offd) = HYPRE_MEMORY_HOST; hypre_ParCSRMatrixCommPkg(S) = NULL; *S_ptr = S; hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] += hypre_MPI_Wtime(); #endif return (ierr); } /*==========================================================================*/ /*==========================================================================*/ /** Generates strength matrix Notes: \begin{itemize} \item The underlying matrix storage scheme is a hypre_ParCSR matrix. \item The routine returns the following: \begin{itemize} \item S - a ParCSR matrix representing the "strength matrix". This is used in the coarsening and interpolation routines. \end{itemize} \item The graph of the "strength matrix" for A is a subgraph of the graph of A, but requires nonsymmetric storage even if A is symmetric. This is because of the directional nature of the "strengh of dependence" notion (see below). Since we are using nonsymmetric storage for A right now, this is not a problem. If we ever add the ability to store A symmetrically, then we could store the strength graph as floats instead of doubles to save space. \item This routine currently "compresses" the strength matrix. We should consider the possibility of defining this matrix to have the same "nonzero structure" as A. To do this, we could use the same A\_i and A\_j arrays, and would need only define the S\_data array. There are several pros and cons to discuss. \end{itemize} Terminology: \begin{itemize} \item Ruge's terminology: A point is "strongly connected to" $j$, or "strongly depends on" $j$, if $|a_ij| >= \theta max_{l != j} |a_il|}$. \item Here, we retain some of this terminology, but with a more generalized notion of "strength". We also retain the "natural" graph notation for representing the directed graph of a matrix. That is, the nonzero entry $a_ij$ is represented as: i --> j. In the strength matrix, S, the entry $s_ij$ is also graphically denoted as above, and means both of the following: \begin{itemize} \item $i$ "depends on" $j$ with "strength" $s_ij$ \item $j$ "influences" $i$ with "strength" $s_ij$ \end{itemize} \end{itemize} {\bf Input files:} _hypre_parcsr_ls.h @return Error code. @param A [IN] coefficient matrix @param strength_threshold [IN] threshold parameter used to define strength @param max_row_sum [IN] parameter used to modify definition of strength for diagonal dominant matrices @param S_ptr [OUT] strength matrix @see */ /*--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSabs(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = NULL; HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int num_cols_offd = 0; hypre_ParCSRMatrix *S; hypre_CSRMatrix *S_diag; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; /* HYPRE_Real *S_diag_data; */ hypre_CSRMatrix *S_offd; HYPRE_Int *S_offd_i = NULL; HYPRE_Int *S_offd_j = NULL; /* HYPRE_Real *S_offd_data; */ HYPRE_Real diag, row_scale, row_sum; HYPRE_Int i, jA, jS; HYPRE_Int ierr = 0; HYPRE_Int *dof_func_offd; HYPRE_Int num_sends; HYPRE_Int *int_buf_data; HYPRE_Int index, start, j; /*-------------------------------------------------------------- * Compute a ParCSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ num_nonzeros_diag = A_diag_i[num_variables]; num_cols_offd = hypre_CSRMatrixNumCols(A_offd); A_offd_i = hypre_CSRMatrixI(A_offd); num_nonzeros_offd = A_offd_i[num_variables]; S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars, row_starts, row_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); /* row_starts is owned by A, col_starts = row_starts */ hypre_ParCSRMatrixSetRowStartsOwner(S,0); S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); S_offd = hypre_ParCSRMatrixOffd(S); hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); S_diag_i = hypre_CSRMatrixI(S_diag); S_diag_j = hypre_CSRMatrixJ(S_diag); S_offd_i = hypre_CSRMatrixI(S_offd); hypre_CSRMatrixMemoryLocation(S_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixMemoryLocation(S_offd) = HYPRE_MEMORY_HOST; dof_func_offd = NULL; if (num_cols_offd) { A_offd_data = hypre_CSRMatrixData(A_offd); hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); S_offd_j = hypre_CSRMatrixJ(S_offd); hypre_ParCSRMatrixColMapOffd(S) = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); if (num_functions > 1) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } /*------------------------------------------------------------------- * Get the dof_func data for the off-processor columns *-------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_functions > 1) { int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } /* give S same nonzero structure as A */ hypre_ParCSRMatrixCopy(A,S,0); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,diag,row_scale,row_sum,jA) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_variables; i++) { diag = A_diag_data[A_diag_i[i]]; /* compute scaling factor and row sum */ row_scale = 0.0; row_sum = fabs(diag); if (num_functions > 1) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (dof_func[i] == dof_func[A_diag_j[jA]]) { row_scale = hypre_max(row_scale, fabs(A_diag_data[jA])); row_sum += fabs(A_diag_data[jA]); } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (dof_func[i] == dof_func_offd[A_offd_j[jA]]) { row_scale = hypre_max(row_scale, fabs(A_offd_data[jA])); row_sum += fabs(A_offd_data[jA]); } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { row_scale = hypre_max(row_scale, fabs(A_diag_data[jA])); row_sum += fabs(A_diag_data[jA]); } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { row_scale = hypre_max(row_scale, fabs(A_offd_data[jA])); row_sum += fabs(A_offd_data[jA]); } } /* compute row entries of S */ S_diag_j[A_diag_i[i]] = -1; /* reject diag entry */ if ( fabs(row_sum) < fabs(diag)*(2.0-max_row_sum) && max_row_sum < 1.0 ) { /* make all dependencies weak */ for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { S_diag_j[jA] = -1; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { S_offd_j[jA] = -1; } } else { if (num_functions > 1) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (fabs(A_diag_data[jA]) <= strength_threshold * row_scale || dof_func[i] != dof_func[A_diag_j[jA]]) { S_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (fabs(A_offd_data[jA]) <= strength_threshold * row_scale || dof_func[i] != dof_func_offd[A_offd_j[jA]]) { S_offd_j[jA] = -1; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (fabs(A_diag_data[jA]) <= strength_threshold * row_scale) { S_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (fabs(A_offd_data[jA]) <= strength_threshold * row_scale) { S_offd_j[jA] = -1; } } } } } /*-------------------------------------------------------------- * "Compress" the strength matrix. * * NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor! * * NOTE: This "compression" section of code may be removed, and * coarsening will still be done correctly. However, the routine * that builds interpolation would have to be modified first. *----------------------------------------------------------------*/ /* RDF: not sure if able to thread this loop */ jS = 0; for (i = 0; i < num_variables; i++) { S_diag_i[i] = jS; for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { if (S_diag_j[jA] > -1) { S_diag_j[jS] = S_diag_j[jA]; jS++; } } } S_diag_i[num_variables] = jS; hypre_CSRMatrixNumNonzeros(S_diag) = jS; /* RDF: not sure if able to thread this loop */ jS = 0; for (i = 0; i < num_variables; i++) { S_offd_i[i] = jS; for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (S_offd_j[jA] > -1) { S_offd_j[jS] = S_offd_j[jA]; jS++; } } } S_offd_i[num_variables] = jS; hypre_CSRMatrixNumNonzeros(S_offd) = jS; hypre_ParCSRMatrixCommPkg(S) = NULL; *S_ptr = S; hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); return (ierr); } /*--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSCommPkg(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *S, HYPRE_Int **col_offd_S_to_A_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_MPI_Status *status; hypre_MPI_Request *requests; hypre_ParCSRCommPkg *comm_pkg_A = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommPkg *comm_pkg_S; hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_BigInt *col_map_offd_S = hypre_ParCSRMatrixColMapOffd(S); HYPRE_Int *recv_procs_A = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A); HYPRE_Int *recv_vec_starts_A = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A); HYPRE_Int *send_procs_A = hypre_ParCSRCommPkgSendProcs(comm_pkg_A); HYPRE_Int *send_map_starts_A = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); HYPRE_Int *recv_procs_S; HYPRE_Int *recv_vec_starts_S; HYPRE_Int *send_procs_S; HYPRE_Int *send_map_starts_S; HYPRE_Int *send_map_elmts_S = NULL; HYPRE_BigInt *big_send_map_elmts_S = NULL; HYPRE_Int *col_offd_S_to_A; HYPRE_Int *S_marker; HYPRE_Int *send_change; HYPRE_Int *recv_change; HYPRE_Int num_variables = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int num_cols_offd_S; HYPRE_Int i, j, jcol; HYPRE_Int proc, cnt, proc_cnt, total_nz; HYPRE_BigInt first_row; HYPRE_Int ierr = 0; HYPRE_Int num_sends_A = hypre_ParCSRCommPkgNumSends(comm_pkg_A); HYPRE_Int num_recvs_A = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A); HYPRE_Int num_sends_S; HYPRE_Int num_recvs_S; HYPRE_Int num_nonzeros; num_nonzeros = S_offd_i[num_variables]; S_marker = NULL; if (num_cols_offd_A) S_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_A; i++) S_marker[i] = -1; for (i=0; i < num_nonzeros; i++) { jcol = S_offd_j[i]; S_marker[jcol] = 0; } proc = 0; proc_cnt = 0; cnt = 0; num_recvs_S = 0; for (i=0; i < num_recvs_A; i++) { for (j=recv_vec_starts_A[i]; j < recv_vec_starts_A[i+1]; j++) { if (!S_marker[j]) { S_marker[j] = cnt; cnt++; proc = 1; } } if (proc) {num_recvs_S++; proc = 0;} } num_cols_offd_S = cnt; recv_change = NULL; recv_procs_S = NULL; send_change = NULL; if (col_map_offd_S) hypre_TFree(col_map_offd_S, HYPRE_MEMORY_HOST); col_map_offd_S = NULL; col_offd_S_to_A = NULL; if (num_recvs_A) recv_change = hypre_CTAlloc(HYPRE_Int, num_recvs_A, HYPRE_MEMORY_HOST); if (num_sends_A) send_change = hypre_CTAlloc(HYPRE_Int, num_sends_A, HYPRE_MEMORY_HOST); if (num_recvs_S) recv_procs_S = hypre_CTAlloc(HYPRE_Int, num_recvs_S, HYPRE_MEMORY_HOST); recv_vec_starts_S = hypre_CTAlloc(HYPRE_Int, num_recvs_S+1, HYPRE_MEMORY_HOST); if (num_cols_offd_S) { col_map_offd_S = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_S, HYPRE_MEMORY_HOST); col_offd_S_to_A = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S, HYPRE_MEMORY_HOST); } if (num_cols_offd_S < num_cols_offd_A) { for (i=0; i < num_nonzeros; i++) { jcol = S_offd_j[i]; S_offd_j[i] = S_marker[jcol]; } proc = 0; proc_cnt = 0; cnt = 0; recv_vec_starts_S[0] = 0; for (i=0; i < num_recvs_A; i++) { for (j=recv_vec_starts_A[i]; j < recv_vec_starts_A[i+1]; j++) { if (S_marker[j] != -1) { col_map_offd_S[cnt] = col_map_offd_A[j]; col_offd_S_to_A[cnt++] = j; proc = 1; } } recv_change[i] = j-cnt-recv_vec_starts_A[i]+recv_vec_starts_S[proc_cnt]; if (proc) { recv_procs_S[proc_cnt++] = recv_procs_A[i]; recv_vec_starts_S[proc_cnt] = cnt; proc = 0; } } } else { for (i=0; i < num_recvs_A; i++) { for (j=recv_vec_starts_A[i]; j < recv_vec_starts_A[i+1]; j++) { col_map_offd_S[j] = col_map_offd_A[j]; col_offd_S_to_A[j] = j; } recv_procs_S[i] = recv_procs_A[i]; recv_vec_starts_S[i] = recv_vec_starts_A[i]; } recv_vec_starts_S[num_recvs_A] = recv_vec_starts_A[num_recvs_A]; } requests = hypre_CTAlloc(hypre_MPI_Request, num_sends_A+num_recvs_A, HYPRE_MEMORY_HOST); j=0; for (i=0; i < num_sends_A; i++) hypre_MPI_Irecv(&send_change[i],1,HYPRE_MPI_INT,send_procs_A[i], 0,comm,&requests[j++]); for (i=0; i < num_recvs_A; i++) hypre_MPI_Isend(&recv_change[i],1,HYPRE_MPI_INT,recv_procs_A[i], 0,comm,&requests[j++]); status = hypre_CTAlloc(hypre_MPI_Status, j, HYPRE_MEMORY_HOST); hypre_MPI_Waitall(j,requests,status); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); num_sends_S = 0; total_nz = send_map_starts_A[num_sends_A]; for (i=0; i < num_sends_A; i++) { if (send_change[i]) { if ((send_map_starts_A[i+1]-send_map_starts_A[i]) > send_change[i]) num_sends_S++; } else num_sends_S++; total_nz -= send_change[i]; } send_procs_S = NULL; if (num_sends_S) send_procs_S = hypre_CTAlloc(HYPRE_Int, num_sends_S, HYPRE_MEMORY_HOST); send_map_starts_S = hypre_CTAlloc(HYPRE_Int, num_sends_S+1, HYPRE_MEMORY_HOST); send_map_elmts_S = NULL; if (total_nz) { send_map_elmts_S = hypre_CTAlloc(HYPRE_Int, total_nz, HYPRE_MEMORY_HOST); big_send_map_elmts_S = hypre_CTAlloc(HYPRE_BigInt, total_nz, HYPRE_MEMORY_HOST); } proc = 0; proc_cnt = 0; for (i=0; i < num_sends_A; i++) { cnt = send_map_starts_A[i+1]-send_map_starts_A[i]-send_change[i]; if (cnt) { send_procs_S[proc_cnt++] = send_procs_A[i]; send_map_starts_S[proc_cnt] = send_map_starts_S[proc_cnt-1]+cnt; } } comm_pkg_S = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_S) = comm; hypre_ParCSRCommPkgNumRecvs(comm_pkg_S) = num_recvs_S; hypre_ParCSRCommPkgRecvProcs(comm_pkg_S) = recv_procs_S; hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_S) = recv_vec_starts_S; hypre_ParCSRCommPkgNumSends(comm_pkg_S) = num_sends_S; hypre_ParCSRCommPkgSendProcs(comm_pkg_S) = send_procs_S; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_S) = send_map_starts_S; comm_handle = hypre_ParCSRCommHandleCreate(22, comm_pkg_S, col_map_offd_S, big_send_map_elmts_S); hypre_ParCSRCommHandleDestroy(comm_handle); first_row = hypre_ParCSRMatrixFirstRowIndex(A); if (first_row) for (i=0; i < send_map_starts_S[num_sends_S]; i++) send_map_elmts_S[i] = (HYPRE_Int)(big_send_map_elmts_S[i]-first_row); hypre_ParCSRCommPkgSendMapElmts(comm_pkg_S) = send_map_elmts_S; hypre_ParCSRMatrixCommPkg(S) = comm_pkg_S; hypre_ParCSRMatrixColMapOffd(S) = col_map_offd_S; hypre_CSRMatrixNumCols(S_offd) = num_cols_offd_S; hypre_TFree(S_marker, HYPRE_MEMORY_HOST); hypre_TFree(send_change, HYPRE_MEMORY_HOST); hypre_TFree(recv_change, HYPRE_MEMORY_HOST); *col_offd_S_to_A_ptr = col_offd_S_to_A; return ierr; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGCreate2ndS : creates strength matrix on coarse points * for second coarsening pass in aggressive coarsening (S*S+2S) *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreate2ndS( hypre_ParCSRMatrix *S, HYPRE_Int *CF_marker, HYPRE_Int num_paths, HYPRE_BigInt *coarse_row_starts, hypre_ParCSRMatrix **C_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATE_2NDS] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommPkg *tmp_comm_pkg; hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Int num_cols_diag_S = hypre_CSRMatrixNumCols(S_diag); HYPRE_Int num_cols_offd_S = hypre_CSRMatrixNumCols(S_offd); hypre_ParCSRMatrix *S2; HYPRE_BigInt *col_map_offd_C = NULL; hypre_CSRMatrix *C_diag; /*HYPRE_Int *C_diag_data = NULL;*/ HYPRE_Int *C_diag_i; HYPRE_Int *C_diag_j = NULL; hypre_CSRMatrix *C_offd; /*HYPRE_Int *C_offd_data=NULL;*/ HYPRE_Int *C_offd_i; HYPRE_Int *C_offd_j=NULL; HYPRE_Int num_cols_offd_C = 0; HYPRE_Int *S_ext_diag_i = NULL; HYPRE_Int *S_ext_diag_j = NULL; HYPRE_Int S_ext_diag_size = 0; HYPRE_Int *S_ext_offd_i = NULL; HYPRE_Int *S_ext_offd_j = NULL; HYPRE_Int S_ext_offd_size = 0; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *S_marker = NULL; HYPRE_Int *S_marker_offd = NULL; //HYPRE_Int *temp = NULL; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_Int *map_S_to_C = NULL; HYPRE_Int num_sends = 0; HYPRE_Int num_recvs = 0; HYPRE_Int *send_map_starts; HYPRE_Int *tmp_send_map_starts = NULL; HYPRE_Int *send_map_elmts; HYPRE_Int *recv_vec_starts; HYPRE_Int *tmp_recv_vec_starts = NULL; HYPRE_Int *int_buf_data = NULL; HYPRE_BigInt *big_int_buf_data = NULL; HYPRE_BigInt *temp = NULL; HYPRE_Int i, j, k; HYPRE_Int i1, i2, i3; HYPRE_BigInt big_i1; HYPRE_Int jj1, jj2, jrow, j_cnt; /*HYPRE_Int cnt, cnt_offd, cnt_diag;*/ HYPRE_Int num_procs, my_id; HYPRE_Int index; /*HYPRE_Int value;*/ HYPRE_Int num_coarse; HYPRE_Int num_nonzeros; HYPRE_BigInt global_num_coarse; HYPRE_BigInt my_first_cpt, my_last_cpt; HYPRE_Int *S_int_i = NULL; HYPRE_BigInt *S_int_j = NULL; HYPRE_Int *S_ext_i = NULL; HYPRE_BigInt *S_ext_j = NULL; /*HYPRE_Int prefix_sum_workspace[2*(hypre_NumThreads() + 1)];*/ HYPRE_Int *prefix_sum_workspace; HYPRE_Int *num_coarse_prefix_sum; prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(hypre_NumThreads() + 1), HYPRE_MEMORY_HOST); num_coarse_prefix_sum = hypre_TAlloc(HYPRE_Int, hypre_NumThreads() + 1, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Extract S_ext, i.e. portion of B that is stored on neighbor procs * and needed locally for matrix matrix product *-----------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = coarse_row_starts[0]; my_last_cpt = coarse_row_starts[1]-1; if (my_id == (num_procs -1)) global_num_coarse = coarse_row_starts[1]; hypre_MPI_Bcast(&global_num_coarse, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else my_first_cpt = coarse_row_starts[my_id]; my_last_cpt = coarse_row_starts[my_id+1]-1; global_num_coarse = coarse_row_starts[num_procs]; #endif if (num_cols_offd_S) { CF_marker_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_S, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_S, HYPRE_MEMORY_HOST); } HYPRE_Int *coarse_to_fine = NULL; if (num_cols_diag_S) { fine_to_coarse = hypre_TAlloc(HYPRE_Int, num_cols_diag_S, HYPRE_MEMORY_HOST); coarse_to_fine = hypre_TAlloc(HYPRE_Int, num_cols_diag_S, HYPRE_MEMORY_HOST); } /*HYPRE_Int num_coarse_prefix_sum[hypre_NumThreads() + 1];*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i) #endif { HYPRE_Int num_coarse_private = 0; HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_diag_S); for (i = i_begin; i < i_end; i++) { if (CF_marker[i] > 0) num_coarse_private++; } hypre_prefix_sum(&num_coarse_private, &num_coarse, num_coarse_prefix_sum); for (i = i_begin; i < i_end; i++) { if (CF_marker[i] > 0) { fine_to_coarse[i] = num_coarse_private; coarse_to_fine[num_coarse_private] = i; num_coarse_private++; } else { fine_to_coarse[i] = -1; } } } /* omp parallel */ if (num_procs > 1) { if (!comm_pkg) { hypre_MatvecCommPkgCreate(S); comm_pkg = hypre_ParCSRMatrixCommPkg(S); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); HYPRE_Int begin = send_map_starts[0]; HYPRE_Int end = send_map_starts[num_sends]; big_int_buf_data = hypre_TAlloc(HYPRE_BigInt, end, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (index = begin; index < end; index++) { big_int_buf_data[index - begin] = (HYPRE_BigInt)fine_to_coarse[send_map_elmts[index]] + my_first_cpt; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); int_buf_data = hypre_TAlloc(HYPRE_Int, end, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (index = begin; index < end; index++) { int_buf_data[index - begin] = CF_marker[send_map_elmts[index]]; } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(big_int_buf_data, HYPRE_MEMORY_HOST); S_int_i = hypre_TAlloc(HYPRE_Int, end+1, HYPRE_MEMORY_HOST); S_ext_i = hypre_CTAlloc(HYPRE_Int, recv_vec_starts[num_recvs]+1, HYPRE_MEMORY_HOST); /*-------------------------------------------------------------------------- * generate S_int_i through adding number of coarse row-elements of offd and diag * for corresponding rows. S_int_i[j+1] contains the number of coarse elements of * a row j (which is determined through send_map_elmts) *--------------------------------------------------------------------------*/ S_int_i[0] = 0; num_nonzeros = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j,k) reduction(+:num_nonzeros) HYPRE_SMP_SCHEDULE #endif for (j = begin; j < end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int index = 0; for (k = S_diag_i[jrow]; k < S_diag_i[jrow+1]; k++) { if (CF_marker[S_diag_j[k]] > 0) index++; } for (k = S_offd_i[jrow]; k < S_offd_i[jrow+1]; k++) { if (CF_marker_offd[S_offd_j[k]] > 0) index++; } S_int_i[j - begin + 1] = index; num_nonzeros += S_int_i[j - begin + 1]; } /*-------------------------------------------------------------------------- * initialize communication *--------------------------------------------------------------------------*/ if (num_procs > 1) comm_handle = hypre_ParCSRCommHandleCreate(11,comm_pkg,&S_int_i[1],&S_ext_i[1]); if (num_nonzeros) S_int_j = hypre_TAlloc(HYPRE_BigInt, num_nonzeros, HYPRE_MEMORY_HOST); tmp_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); tmp_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); tmp_send_map_starts[0] = 0; j_cnt = 0; for (i=0; i < num_sends; i++) { for (j = send_map_starts[i]; j < send_map_starts[i+1]; j++) { jrow = send_map_elmts[j]; for (k=S_diag_i[jrow]; k < S_diag_i[jrow+1]; k++) { if (CF_marker[S_diag_j[k]] > 0) S_int_j[j_cnt++] = (HYPRE_BigInt)fine_to_coarse[S_diag_j[k]]+my_first_cpt; } for (k=S_offd_i[jrow]; k < S_offd_i[jrow+1]; k++) { if (CF_marker_offd[S_offd_j[k]] > 0) S_int_j[j_cnt++] = fine_to_coarse_offd[S_offd_j[k]]; } } tmp_send_map_starts[i+1] = j_cnt; } tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = tmp_send_map_starts; hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; /*-------------------------------------------------------------------------- * after communication exchange S_ext_i[j+1] contains the number of coarse elements * of a row j ! * evaluate S_ext_i and compute num_nonzeros for S_ext *--------------------------------------------------------------------------*/ for (i=0; i < recv_vec_starts[num_recvs]; i++) S_ext_i[i+1] += S_ext_i[i]; num_nonzeros = S_ext_i[recv_vec_starts[num_recvs]]; if (num_nonzeros) S_ext_j = hypre_TAlloc(HYPRE_BigInt, num_nonzeros, HYPRE_MEMORY_HOST); tmp_recv_vec_starts[0] = 0; for (i=0; i < num_recvs; i++) tmp_recv_vec_starts[i+1] = S_ext_i[recv_vec_starts[i+1]]; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = tmp_recv_vec_starts; comm_handle = hypre_ParCSRCommHandleCreate(21,tmp_comm_pkg,S_int_j,S_ext_j); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; hypre_TFree(tmp_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); hypre_TFree(S_int_i, HYPRE_MEMORY_HOST); hypre_TFree(S_int_j, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); #endif #ifdef HYPRE_CONCURRENT_HOPSCOTCH HYPRE_BigInt *S_big_offd_j = NULL; S_ext_diag_i = hypre_TAlloc(HYPRE_Int, num_cols_offd_S+1, HYPRE_MEMORY_HOST); S_ext_diag_i[0] = 0; S_ext_offd_i = hypre_TAlloc(HYPRE_Int, num_cols_offd_S+1, HYPRE_MEMORY_HOST); S_ext_offd_i[0] = 0; hypre_UnorderedBigIntSet found_set; hypre_UnorderedBigIntSetCreate(&found_set, S_ext_i[num_cols_offd_S] + num_cols_offd_S, 16*hypre_NumThreads()); #pragma omp parallel private(i,j, big_i1) { HYPRE_Int S_ext_offd_size_private = 0; HYPRE_Int S_ext_diag_size_private = 0; HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_S); for (i = i_begin; i < i_end; i++) { if (CF_marker_offd[i] > 0) { hypre_UnorderedBigIntSetPut(&found_set, fine_to_coarse_offd[i]); } for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) { big_i1 = S_ext_j[j]; if (big_i1 < my_first_cpt || big_i1 > my_last_cpt) { S_ext_offd_size_private++; hypre_UnorderedBigIntSetPut(&found_set, big_i1); } else S_ext_diag_size_private++; } } hypre_prefix_sum_pair( &S_ext_diag_size_private, &S_ext_diag_size, &S_ext_offd_size_private, &S_ext_offd_size, prefix_sum_workspace); #pragma omp master { if (S_ext_diag_size) S_ext_diag_j = hypre_TAlloc(HYPRE_Int, S_ext_diag_size, HYPRE_MEMORY_HOST); if (S_ext_offd_size) { S_ext_offd_j = hypre_TAlloc(HYPRE_Int, S_ext_offd_size, HYPRE_MEMORY_HOST); S_big_offd_j = hypre_TAlloc(HYPRE_BigInt, S_ext_offd_size, HYPRE_MEMORY_HOST); } } #pragma omp barrier for (i = i_begin; i < i_end; i++) { for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) { big_i1 = S_ext_j[j]; if (big_i1 < my_first_cpt || big_i1 > my_last_cpt) S_big_offd_j[S_ext_offd_size_private++] = big_i1; //S_ext_offd_j[S_ext_offd_size_private++] = big_i1; else S_ext_diag_j[S_ext_diag_size_private++] = (HYPRE_Int)(big_i1 - my_first_cpt); } S_ext_diag_i[i + 1] = S_ext_diag_size_private; S_ext_offd_i[i + 1] = S_ext_offd_size_private; } } // omp parallel temp = hypre_UnorderedBigIntSetCopyToArray(&found_set, &num_cols_offd_C); hypre_UnorderedBigIntSetDestroy(&found_set); hypre_TFree(S_ext_i, HYPRE_MEMORY_HOST); hypre_UnorderedBigIntMap col_map_offd_C_inverse; hypre_big_sort_and_create_inverse_map(temp, num_cols_offd_C, &col_map_offd_C, &col_map_offd_C_inverse); #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i=0 ; i < S_ext_offd_size; i++) S_ext_offd_j[i] = hypre_UnorderedBigIntMapGet(&col_map_offd_C_inverse, S_big_offd_j[i]); //S_ext_offd_j[i] = hypre_UnorderedIntMapGet(&col_map_offd_C_inverse, S_ext_offd_j[i]); hypre_TFree(S_ext_j, HYPRE_MEMORY_HOST); hypre_TFree(S_big_offd_j, HYPRE_MEMORY_HOST); if (num_cols_offd_C) hypre_UnorderedBigIntMapDestroy(&col_map_offd_C_inverse); #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ HYPRE_Int cnt_offd, cnt_diag, cnt, value; S_ext_diag_size = 0; S_ext_offd_size = 0; for (i=0; i < num_cols_offd_S; i++) { for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) { if (S_ext_j[j] < my_first_cpt || S_ext_j[j] > my_last_cpt) S_ext_offd_size++; else S_ext_diag_size++; } } S_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S+1, HYPRE_MEMORY_HOST); S_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S+1, HYPRE_MEMORY_HOST); if (S_ext_diag_size) { S_ext_diag_j = hypre_CTAlloc(HYPRE_Int, S_ext_diag_size, HYPRE_MEMORY_HOST); } if (S_ext_offd_size) { S_ext_offd_j = hypre_CTAlloc(HYPRE_Int, S_ext_offd_size, HYPRE_MEMORY_HOST); } cnt_offd = 0; cnt_diag = 0; cnt = 0; HYPRE_Int num_coarse_offd = 0; for (i=0; i < num_cols_offd_S; i++) { if (CF_marker_offd[i] > 0) num_coarse_offd++; for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) { big_i1 = S_ext_j[j]; if (big_i1 < my_first_cpt || big_i1 > my_last_cpt) S_ext_j[cnt_offd++] = big_i1; else S_ext_diag_j[cnt_diag++] = (HYPRE_Int)(big_i1 - my_first_cpt); } S_ext_diag_i[++cnt] = cnt_diag; S_ext_offd_i[cnt] = cnt_offd; } hypre_TFree(S_ext_i, HYPRE_MEMORY_HOST); cnt = 0; if (S_ext_offd_size || num_coarse_offd) { temp = hypre_CTAlloc(HYPRE_BigInt, S_ext_offd_size+num_coarse_offd, HYPRE_MEMORY_HOST); for (i=0; i < S_ext_offd_size; i++) temp[i] = S_ext_j[i]; cnt = S_ext_offd_size; for (i=0; i < num_cols_offd_S; i++) if (CF_marker_offd[i] > 0) temp[cnt++] = fine_to_coarse_offd[i]; } if (cnt) { hypre_BigQsort0(temp, 0, cnt-1); num_cols_offd_C = 1; value = temp[0]; for (i=1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_C; i++) col_map_offd_C[i] = temp[i]; if (S_ext_offd_size || num_coarse_offd) hypre_TFree(temp, HYPRE_MEMORY_HOST); for (i=0 ; i < S_ext_offd_size; i++) S_ext_offd_j[i] = hypre_BigBinarySearch(col_map_offd_C, S_ext_j[i], num_cols_offd_C); hypre_TFree(S_ext_j, HYPRE_MEMORY_HOST); #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ if (num_cols_offd_S) { map_S_to_C = hypre_TAlloc(HYPRE_Int, num_cols_offd_S, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i) #endif { HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_S); HYPRE_BigInt cnt = 0; for (i = i_begin; i < i_end; i++) { if (CF_marker_offd[i] > 0) { cnt = hypre_BigLowerBound(col_map_offd_C + cnt, col_map_offd_C + num_cols_offd_C, fine_to_coarse_offd[i]) - col_map_offd_C; map_S_to_C[i] = cnt++; } else map_S_to_C[i] = -1; } } /* omp parallel */ } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); #endif } /* num_procs > 1 */ /*----------------------------------------------------------------------- * Allocate and initialize some stuff. *-----------------------------------------------------------------------*/ HYPRE_Int *S_marker_array = NULL, *S_marker_offd_array = NULL; if (num_coarse) S_marker_array = hypre_TAlloc(HYPRE_Int, num_coarse*hypre_NumThreads(), HYPRE_MEMORY_HOST); if (num_cols_offd_C) S_marker_offd_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_C*hypre_NumThreads(), HYPRE_MEMORY_HOST); HYPRE_Int *C_temp_offd_j_array = NULL; HYPRE_Int *C_temp_diag_j_array = NULL; HYPRE_Int *C_temp_offd_data_array = NULL; HYPRE_Int *C_temp_diag_data_array = NULL; if (num_paths > 1) { C_temp_diag_j_array = hypre_TAlloc(HYPRE_Int, num_coarse*hypre_NumThreads(), HYPRE_MEMORY_HOST); C_temp_offd_j_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_C*hypre_NumThreads(), HYPRE_MEMORY_HOST); C_temp_diag_data_array = hypre_TAlloc(HYPRE_Int, num_coarse*hypre_NumThreads(), HYPRE_MEMORY_HOST); C_temp_offd_data_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_C*hypre_NumThreads(), HYPRE_MEMORY_HOST); } C_diag_i = hypre_CTAlloc(HYPRE_Int, num_coarse+1, HYPRE_MEMORY_HOST); C_offd_i = hypre_CTAlloc(HYPRE_Int, num_coarse+1, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Loop over rows of S *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i1,i2,i3,jj1,jj2,index) #endif { HYPRE_Int my_thread_num = hypre_GetThreadNum(); HYPRE_Int i1_begin, i1_end; hypre_GetSimpleThreadPartition(&i1_begin, &i1_end, num_cols_diag_S); HYPRE_Int *C_temp_diag_j = NULL, *C_temp_offd_j = NULL; HYPRE_Int *C_temp_diag_data = NULL, *C_temp_offd_data = NULL; if (num_paths > 1) { C_temp_diag_j = C_temp_diag_j_array + num_coarse*my_thread_num; C_temp_offd_j = C_temp_offd_j_array + num_cols_offd_C*my_thread_num; C_temp_diag_data = C_temp_diag_data_array + num_coarse*my_thread_num; C_temp_offd_data = C_temp_offd_data_array + num_cols_offd_C*my_thread_num; } HYPRE_Int *S_marker = NULL, *S_marker_offd = NULL; if (num_coarse) S_marker = S_marker_array + num_coarse*my_thread_num; if (num_cols_offd_C) S_marker_offd = S_marker_offd_array + num_cols_offd_C*my_thread_num; for (i1 = 0; i1 < num_coarse; i1++) { S_marker[i1] = -1; } for (i1 = 0; i1 < num_cols_offd_C; i1++) { S_marker_offd[i1] = -1; } // These two counters are for before filtering by num_paths HYPRE_Int jj_count_diag = 0; HYPRE_Int jj_count_offd = 0; // These two counters are for after filtering by num_paths HYPRE_Int num_nonzeros_diag = 0; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int ic_begin = num_coarse_prefix_sum[my_thread_num]; HYPRE_Int ic_end = num_coarse_prefix_sum[my_thread_num + 1]; HYPRE_Int ic; if (num_paths == 1) { for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = num_nonzeros_diag; HYPRE_Int jj_row_begin_offd = num_nonzeros_offd; C_diag_i[ic] = num_nonzeros_diag; if (num_cols_offd_C) { C_offd_i[ic] = num_nonzeros_offd; } for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; num_nonzeros_diag++; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2+1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0) { index = fine_to_coarse[i3]; if (index != ic && S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; num_nonzeros_diag++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2+1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; num_nonzeros_offd++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; num_nonzeros_offd++; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2+1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic && S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = num_nonzeros_diag; num_nonzeros_diag++; } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2+1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = num_nonzeros_offd; num_nonzeros_offd++; } } } } /* for each row */ } /* num_paths == 1 */ else { for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = jj_count_diag; HYPRE_Int jj_row_begin_offd = jj_count_offd; C_diag_i[ic] = num_nonzeros_diag; if (num_cols_offd_C) { C_offd_i[ic] = num_nonzeros_offd; } for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 2; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag] += 2; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2+1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0 && fine_to_coarse[i3] != ic) { index = fine_to_coarse[i3]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag]++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2+1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd]++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 2; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd] += 2; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2+1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic) { if (S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = jj_count_diag; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[i3] - jj_row_begin_diag]++; } } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2+1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = jj_count_offd; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[i3] - jj_row_begin_offd]++; } } } for (jj1 = jj_row_begin_diag; jj1 < jj_count_diag; jj1++) { if (C_temp_diag_data[jj1 - jj_row_begin_diag] >= num_paths) { ++num_nonzeros_diag; } C_temp_diag_data[jj1 - jj_row_begin_diag] = 0; } for (jj1 = jj_row_begin_offd; jj1 < jj_count_offd; jj1++) { if (C_temp_offd_data[jj1 - jj_row_begin_offd] >= num_paths) { ++num_nonzeros_offd; } C_temp_offd_data[jj1 - jj_row_begin_offd] = 0; } } /* for each row */ } /* num_paths > 1 */ hypre_prefix_sum_pair( &num_nonzeros_diag, &C_diag_i[num_coarse], &num_nonzeros_offd, &C_offd_i[num_coarse], prefix_sum_workspace); for (i1 = 0; i1 < num_coarse; i1++) { S_marker[i1] = -1; } for (i1 = 0; i1 < num_cols_offd_C; i1++) { S_marker_offd[i1] = -1; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #pragma omp master #endif { if (C_diag_i[num_coarse]) { C_diag_j = hypre_TAlloc(HYPRE_Int, C_diag_i[num_coarse], HYPRE_MEMORY_HOST); } if (C_offd_i[num_coarse]) { C_offd_j = hypre_TAlloc(HYPRE_Int, C_offd_i[num_coarse], HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (ic = ic_begin; ic < ic_end - 1; ic++) { if (C_diag_i[ic+1] == C_diag_i[ic] && C_offd_i[ic+1] == C_offd_i[ic]) CF_marker[coarse_to_fine[ic]] = 2; C_diag_i[ic] += num_nonzeros_diag; C_offd_i[ic] += num_nonzeros_offd; } if (ic_begin < ic_end) { C_diag_i[ic] += num_nonzeros_diag; C_offd_i[ic] += num_nonzeros_offd; HYPRE_Int next_C_diag_i = prefix_sum_workspace[2*(my_thread_num + 1)]; HYPRE_Int next_C_offd_i = prefix_sum_workspace[2*(my_thread_num + 1) + 1]; if (next_C_diag_i == C_diag_i[ic] && next_C_offd_i == C_offd_i[ic]) CF_marker[coarse_to_fine[ic]] = 2; } if (num_paths == 1) { for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = num_nonzeros_diag; HYPRE_Int jj_row_begin_offd = num_nonzeros_offd; for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; C_diag_j[num_nonzeros_diag] = index; num_nonzeros_diag++; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2+1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0) { index = fine_to_coarse[i3]; if (index != ic && S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; C_diag_j[num_nonzeros_diag] = index; num_nonzeros_diag++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2+1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; C_offd_j[num_nonzeros_offd] = index; num_nonzeros_offd++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; C_offd_j[num_nonzeros_offd] = index; num_nonzeros_offd++; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2+1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic && S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = num_nonzeros_diag; C_diag_j[num_nonzeros_diag] = i3; num_nonzeros_diag++; } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2+1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = num_nonzeros_offd; C_offd_j[num_nonzeros_offd] = i3; num_nonzeros_offd++; } } } } /* for each row */ } /* num_paths == 1 */ else { jj_count_diag = num_nonzeros_diag; jj_count_offd = num_nonzeros_offd; for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = jj_count_diag; HYPRE_Int jj_row_begin_offd = jj_count_offd; for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_j[jj_count_diag - jj_row_begin_diag] = index; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 2; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag] += 2; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2+1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0 && fine_to_coarse[i3] != ic) { index = fine_to_coarse[i3]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_j[jj_count_diag - jj_row_begin_diag] = index; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag]++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2+1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_j[jj_count_offd - jj_row_begin_offd] = index; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd]++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_j[jj_count_offd - jj_row_begin_offd] = index; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 2; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd] += 2; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2+1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic) { if (S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = jj_count_diag; C_temp_diag_j[jj_count_diag - jj_row_begin_diag] = i3; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[i3] - jj_row_begin_diag]++; } } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2+1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = jj_count_offd; C_temp_offd_j[jj_count_offd - jj_row_begin_offd] = i3; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[i3] - jj_row_begin_offd]++; } } } for (jj1 = jj_row_begin_diag; jj1 < jj_count_diag; jj1++) { if (C_temp_diag_data[jj1 - jj_row_begin_diag] >= num_paths) { C_diag_j[num_nonzeros_diag++] = C_temp_diag_j[jj1 - jj_row_begin_diag]; } C_temp_diag_data[jj1 - jj_row_begin_diag] = 0; } for (jj1 = jj_row_begin_offd; jj1 < jj_count_offd; jj1++) { if (C_temp_offd_data[jj1 - jj_row_begin_offd] >= num_paths) { C_offd_j[num_nonzeros_offd++] = C_temp_offd_j[jj1 - jj_row_begin_offd]; } C_temp_offd_data[jj1 - jj_row_begin_offd] = 0; } } /* for each row */ } /* num_paths > 1 */ } /* omp parallel */ S2 = hypre_ParCSRMatrixCreate(comm, global_num_coarse, global_num_coarse, coarse_row_starts, coarse_row_starts, num_cols_offd_C, C_diag_i[num_coarse], C_offd_i[num_coarse]); hypre_ParCSRMatrixOwnsRowStarts(S2) = 0; C_diag = hypre_ParCSRMatrixDiag(S2); hypre_CSRMatrixI(C_diag) = C_diag_i; if (C_diag_i[num_coarse]) hypre_CSRMatrixJ(C_diag) = C_diag_j; C_offd = hypre_ParCSRMatrixOffd(S2); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_ParCSRMatrixOffd(S2) = C_offd; if (num_cols_offd_C) { if (C_offd_i[num_coarse]) hypre_CSRMatrixJ(C_offd) = C_offd_j; hypre_ParCSRMatrixColMapOffd(S2) = col_map_offd_C; } /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ hypre_TFree(C_temp_diag_j_array, HYPRE_MEMORY_HOST); hypre_TFree(C_temp_diag_data_array, HYPRE_MEMORY_HOST); hypre_TFree(C_temp_offd_j_array, HYPRE_MEMORY_HOST); hypre_TFree(C_temp_offd_data_array, HYPRE_MEMORY_HOST); hypre_TFree(S_marker_array, HYPRE_MEMORY_HOST); hypre_TFree(S_marker_offd_array, HYPRE_MEMORY_HOST); hypre_TFree(S_marker, HYPRE_MEMORY_HOST); hypre_TFree(S_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(S_ext_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(coarse_to_fine, HYPRE_MEMORY_HOST); if (S_ext_diag_size) { hypre_TFree(S_ext_diag_j, HYPRE_MEMORY_HOST); } hypre_TFree(S_ext_offd_i, HYPRE_MEMORY_HOST); if (S_ext_offd_size) { hypre_TFree(S_ext_offd_j, HYPRE_MEMORY_HOST); } if (num_cols_offd_S) { hypre_TFree(map_S_to_C, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); } hypre_CSRMatrixMemoryLocation(C_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixMemoryLocation(C_offd) = HYPRE_MEMORY_HOST; *C_ptr = S2; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATE_2NDS] += hypre_MPI_Wtime(); #endif hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); hypre_TFree(num_coarse_prefix_sum, HYPRE_MEMORY_HOST); return 0; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGCorrectCFMarker : corrects CF_marker after aggr. coarsening *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCorrectCFMarker(HYPRE_Int *CF_marker, HYPRE_Int num_var, HYPRE_Int *new_CF_marker) { HYPRE_Int i, cnt; cnt = 0; for (i=0; i < num_var; i++) { if (CF_marker[i] > 0 ) { if (CF_marker[i] == 1) CF_marker[i] = new_CF_marker[cnt++]; else { CF_marker[i] = 1; cnt++;} } } return 0; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGCorrectCFMarker2 : corrects CF_marker after aggr. coarsening, * but marks new F-points (previous C-points) as -2 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCorrectCFMarker2(HYPRE_Int *CF_marker, HYPRE_Int num_var, HYPRE_Int *new_CF_marker) { HYPRE_Int i, cnt; cnt = 0; for (i=0; i < num_var; i++) { if (CF_marker[i] > 0 ) { if (new_CF_marker[cnt] == -1) CF_marker[i] = -2; else CF_marker[i] = 1; cnt++; } } return 0; }
depend_iterator_bug.c
// RUN: %clang_cc1 -verify -fopenmp -triple x86_64-unknown-linux-gnu \ // RUN: -disable-llvm-passes -emit-llvm %s -o - | FileCheck %s // expected-no-diagnostics int x[100]; int y[100]; // CHECK-LABEL: @many_iterators_single_clause( // CHECK: [[VLA:%.*]] = alloca [[STRUCT_KMP_DEPEND_INFO:%.*]], i64 10, align 16 // CHECK: = call i32 @__kmpc_omp_task_with_deps(%struct.ident_t* {{.*}}, i32 {{.*}}, i8* {{.*}}, i32 10, i8* {{.*}}, i32 0, i8* null) void many_iterators_single_clause() { #pragma omp task depend(iterator(j=0:5), in: x[j], y[j]) { } } // CHECK-LABEL: @many_iterators_many_clauses( // CHECK: [[VLA:%.*]] = alloca [[STRUCT_KMP_DEPEND_INFO:%.*]], i64 10, align 16 // CHECK: = call i32 @__kmpc_omp_task_with_deps(%struct.ident_t* {{.*}}, i32 {{.*}}, i8* {{.*}}, i32 10, i8* {{.*}}, i32 0, i8* null) void many_iterators_many_clauses() { #pragma omp task depend(iterator(j=0:5), in: x[j]) \ depend(iterator(j=0:5), in: y[j]) { } }
Tree.h
#pragma once #include <parallel/algorithm> #include "Node.h" #include "wtime.h" #if 0 #define SLOW #endif struct Tree { typedef boundary<float> Boundary; Particle::Vector tree; Boundary BBox; /* bounding box */ std::vector<Node *> leafArray; struct cmp_particle_key { bool operator() (const Particle &a, const Particle &b) {return a.key.val < b.key.val;} }; Tree(const Particle::Vector &ptcl_in, const int Nngb = -1) { const double t0 = wtime(); Node::clear(); std::vector<Particle> &ptcl = Node::ptcl; ptcl = ptcl_in; const int nbody = ptcl_in.size(); /* import particles and compute the Bounding Box */ for (int i = 0; i < nbody; i++) BBox.merge(Boundary(ptcl[i].pos)); std::cerr << BBox.min << std::endl; std::cerr << BBox.max << std::endl; const vec3 vsize = BBox.hlen(); const float rsize = std::max(vsize.x, std::max(vsize.x, vsize.y)) * 2.0f; float rsize2 = 1.0; while (rsize2 > rsize) rsize2 *= 0.5; while (rsize2 < rsize) rsize2 *= 2.0; /* now build the tree */ for (int i = 0; i < nbody; i++) ptcl[i].compute_key(BBox.min, rsize2); __gnu_parallel::sort(ptcl.begin(), ptcl.end(), cmp_particle_key()); Node::Node_heap.push_back(Node()); Node &root = Node::Node_heap[0]; root.size = rsize2; for (int i = 0; i < nbody; i++) root.push_particle(i, 60); const float volume = rsize*rsize*rsize; root.set_init_h(float(Nngb), volume); root.find_group_Node(Node::NLEAF, leafArray); const double t1 = wtime(); fprintf(stderr, " -- Tree build is done in %g sec [ %g ptcl/sec ]\n", t1 - t0, nbody/(t1 - t0)); const int niter = 10; if (Nngb > 0) for (int iter = 0; iter< niter; iter++) { root.make_boundary(); #pragma omp parallel for for (int i = 0; i < nbody; i++) ptcl[i].nnb = 0; #if 0 /* SLOW */ #pragma omp parallel for for(int i=0; i<nbody; i++) ptcl[i] << root; #else /* FAST */ std::vector<Node *> group_list; root.find_group_Node(2000, group_list); #pragma omp parallel for schedule(dynamic) for(int i=0; i<(int)group_list.size(); i++) *group_list[i] << root; #endif using long_t = unsigned long long; long_t nbMean = 0; long_t nbMax = 0; long_t nbMin = 1<<30; #pragma omp parallel for reduction(+:nbMean) reduction(max:nbMax) reduction(min:nbMin) for (int i = 0; i < nbody; i++) { const float f = 0.5f * (1.0f + cbrtf(Nngb / (float)ptcl[i].nnb)); const float fScale = std::max(std::min(f,2.0f), 0.8f); ptcl[i].set_h(ptcl[i].get_h() * fScale); nbMean += ptcl[i].nnb; nbMax = std::max(nbMax, (long_t)ptcl[i].nnb); nbMin = std::min(nbMin, (long_t)ptcl[i].nnb); } fprintf(stderr, "iteration= %d : nbMin= %g nbMean= %g nbMax= %g\n", iter, (float)nbMin, (float)nbMean/nbody, (float)nbMax); } const double t2 = wtime(); fprintf(stderr, " -- Ngb find is done in %g sec [ %g ptcl/sec ]\n", t2 - t1, nbody/(t2 - t1)); }; };
DetailedPlaceDB.h
/** * @file DetailedPlaceDB.h * @author Yibo Lin * @date Jan 2019 */ #ifndef _DREAMPLACE_UTILITY_DETAILEDPLACEDB_H #define _DREAMPLACE_UTILITY_DETAILEDPLACEDB_H #include "math.h" #include "utility/src/Msg.h" #include "utility/src/Box.h" #include "legality_check/src/legality_check.h" //#include "draw_place/src/draw_place.h" #include "utility/src/common.h" DREAMPLACE_BEGIN_NAMESPACE template <typename T> struct Space { T xl; T xh; }; struct BinMapIndex { int bin_id; int sub_id; }; struct RowMapIndex { int row_id; int sub_id; }; /// @brief a wrapper class of required data for detailed placement template <typename T> struct DetailedPlaceDB { typedef T type; const T* init_x; const T* init_y; const T* node_size_x; const T* node_size_y; const T* flat_region_boxes; ///< number of boxes x 4 const int* flat_region_boxes_start; ///< number of regions + 1 const int* node2fence_region_map; ///< length of number of movable cells T* x; T* y; const int* flat_net2pin_map; const int* flat_net2pin_start_map; const int* pin2net_map; const int* flat_node2pin_map; const int* flat_node2pin_start_map; const int* pin2node_map; const T* pin_offset_x; const T* pin_offset_y; const unsigned char* net_mask; T xl; T yl; T xh; T yh; T site_width; T row_height; T bin_size_x; T bin_size_y; int num_bins_x; int num_bins_y; int num_sites_x; int num_sites_y; int num_nodes; int num_movable_nodes; int num_nets; int num_pins; int num_regions; ///< number of regions for flat_region_boxes and flat_region_boxes_start inline int pos2site_x(T xx) const { int sx = (xx-xl)/site_width; sx = std::max(sx, 0); sx = std::min(sx, num_sites_x-1); return sx; } inline int pos2site_y(T yy) const { int sy = (yy-yl)/row_height; sy = std::max(sy, 0); sy = std::min(sy, num_sites_y-1); return sy; } /// @brief site index as an upper bound inline int pos2site_ub_x(T xx) const { int sx = ceil((xx-xl)/site_width); sx = std::max(sx, 1); sx = std::min(sx, num_sites_x); return sx; } /// @brief site index as an upper bound inline int pos2site_ub_y(T yy) const { int sy = ceil((yy-yl)/row_height); sy = std::max(sy, 1); sy = std::min(sy, num_sites_y); return sy; } inline int pos2bin_x(T xx) const { int bx = (xx-xl)/bin_size_x; bx = std::max(bx, 0); bx = std::min(bx, num_bins_x-1); return bx; } inline int pos2bin_y(T yy) const { int by = (yy-yl)/bin_size_y; by = std::max(by, 0); by = std::min(by, num_bins_y-1); return by; } inline void shift_box_to_layout(UBox<T>& box) const { box.xl = std::max(box.xl, xl); box.xl = std::min(box.xl, xh); box.xh = std::max(box.xh, xl); box.xh = std::min(box.xh, xh); box.yl = std::max(box.yl, yl); box.yl = std::min(box.yl, yh); box.yh = std::max(box.yh, yl); box.yh = std::min(box.yh, yh); } inline UBox<int> box2sitebox(const UBox<T>& box) const { // xh, yh are exclusive UBox<int> sitebox ( pos2site_x(box.xl), pos2site_y(box.yl), pos2site_ub_x(box.xh), pos2site_ub_y(box.yh) ); return sitebox; } inline UBox<int> box2binbox(const UBox<T>& box) const { UBox<int> binbox ( pos2bin_x(box.xl), pos2bin_y(box.yl), pos2bin_x(box.xh), pos2bin_y(box.yh) ); return binbox; } /// @brief align x coordinate to site inline T align2site(T xx) const { return floor((xx-xl)/site_width)*site_width+xl; } /// @brief align x coordinate to site for a space; /// make sure the space is shrinked. inline Space<T> align2site(Space<T> space) const { space.xl = ceil((space.xl - xl) / site_width) * site_width + xl; space.xh = floor((space.xh - xl) / site_width) * site_width + xl; return space; } /// @brief compute optimal region for a cell /// The method to compute optimal region ignores the pin offsets of the target cell. /// If we want to consider the pin offsets, there may not be feasible box for the optimal region. /// Thus, this is just an approximate optimal region. /// When using the optimal region, one needs to refer to the center of the cell to the region, or the region completely covers the entire cell. UBox<T> compute_optimal_region(int node_id) const { UBox<T> box ( std::numeric_limits<T>::max(), std::numeric_limits<T>::max(), -std::numeric_limits<T>::max(), -std::numeric_limits<T>::max() ); for (int node2pin_id = flat_node2pin_start_map[node_id]; node2pin_id < flat_node2pin_start_map[node_id+1]; ++node2pin_id) { int node_pin_id = flat_node2pin_map[node2pin_id]; int net_id = pin2net_map[node_pin_id]; if (net_mask[net_id]) { for (int net2pin_id = flat_net2pin_start_map[net_id]; net2pin_id < flat_net2pin_start_map[net_id+1]; ++net2pin_id) { int net_pin_id = flat_net2pin_map[net2pin_id]; int other_node_id = pin2node_map[net_pin_id]; if (node_id != other_node_id) { box.xl = std::min(box.xl, x[other_node_id]+pin_offset_x[net_pin_id]); box.xh = std::max(box.xh, x[other_node_id]+pin_offset_x[net_pin_id]); box.yl = std::min(box.yl, y[other_node_id]+pin_offset_y[net_pin_id]); box.yh = std::max(box.yh, y[other_node_id]+pin_offset_y[net_pin_id]); } } } } shift_box_to_layout(box); return box; } int64_t compute_net_hpwl_edi(int net_id) const; int64_t compute_total_hpwl_edi() const; /// @brief compute HPWL for a net T compute_net_hpwl(int net_id) const { UBox<T> box ( std::numeric_limits<T>::max(), std::numeric_limits<T>::max(), -std::numeric_limits<T>::max(), -std::numeric_limits<T>::max() ); for (int net2pin_id = flat_net2pin_start_map[net_id]; net2pin_id < flat_net2pin_start_map[net_id+1]; ++net2pin_id) { int net_pin_id = flat_net2pin_map[net2pin_id]; int other_node_id = pin2node_map[net_pin_id]; box.xl = std::min(box.xl, x[other_node_id]+pin_offset_x[net_pin_id]); box.xh = std::max(box.xh, x[other_node_id]+pin_offset_x[net_pin_id]); box.yl = std::min(box.yl, y[other_node_id]+pin_offset_y[net_pin_id]); box.yh = std::max(box.yh, y[other_node_id]+pin_offset_y[net_pin_id]); } if (box.xl == std::numeric_limits<T>::max() || box.yl == std::numeric_limits<T>::max()) { return (T)0; } return (box.xh-box.xl) + (box.yh-box.yl); } /// @brief compute HPWL for all nets T compute_total_hpwl() const { //dreamplacePrint(kDEBUG, "start compute_total_hpwl\n"); T total_hpwl = 0; for (int net_id = 0; net_id < num_nets; ++net_id) { //if (net_mask[net_id]) { total_hpwl += compute_net_hpwl(net_id); } } //dreamplacePrint(kDEBUG, "end compute_total_hpwl\n"); return total_hpwl/site_width; } /// @brief distribute cells to rows void make_row2node_map(const T* vx, const T* vy, std::vector<std::vector<int> >& row2node_map, int num_threads) const { // distribute cells to rows for (int i = 0; i < num_nodes; ++i) { //T node_xl = vx[i]; T node_yl = vy[i]; //T node_xh = node_xl+node_size_x[i]; T node_yh = node_yl+node_size_y[i]; int row_idxl = (node_yl-yl)/row_height; int row_idxh = ceil(static_cast<double>(node_yh-yl)/row_height)+1; row_idxl = std::max(row_idxl, 0); row_idxh = std::min(row_idxh, num_sites_y); for (int row_id = row_idxl; row_id < row_idxh; ++row_id) { T row_yl = yl+row_id*row_height; T row_yh = row_yl+row_height; if (node_yl < row_yh && node_yh > row_yl) // overlap with row { row2node_map[row_id].push_back(i); } } } // sort cells within rows // it is safer to sort by center // sometimes there might be cells with 0 sizes #ifdef _OPENMP #pragma omp parallel for num_threads (num_threads) schedule(dynamic, 1) #endif for (int i = 0; i < num_sites_y; ++i) { auto& row2nodes = row2node_map[i]; // sort cells within rows according to left edges std::sort(row2nodes.begin(), row2nodes.end(), [&] (int node_id1, int node_id2) { T x1 = vx[node_id1]; T x2 = vx[node_id2]; return x1 < x2 || (x1 == x2 && node_id1 < node_id2); }); // After sorting by left edge, // there is a special case for fixed cells where // one fixed cell is completely within another in a row. // This will cause failure to detect some overlaps. // We need to remove the "small" fixed cell that is inside another. if (!row2nodes.empty()) { removeContainedFixedCellsFromRow(row2nodes, num_movable_nodes, vx, node_size_x); // sort according to center std::sort(row2nodes.begin(), row2nodes.end(), [&] (int node_id1, int node_id2) { T x1 = vx[node_id1] + node_size_x[node_id1]/2; T x2 = vx[node_id2] + node_size_x[node_id2]/2; return x1 < x2 || (x1 == x2 && node_id1 < node_id2); }); } } } /// @brief distribute movable cells to bins void make_bin2node_map(const T* host_x, const T* host_y, const T* host_node_size_x, const T* host_node_size_y, std::vector<std::vector<int> >& bin2node_map, std::vector<BinMapIndex>& node2bin_map) const { // construct bin2node_map for (int i = 0; i < num_movable_nodes; ++i) { int node_id = i; T node_x = host_x[node_id] + host_node_size_x[node_id]/2; T node_y = host_y[node_id] + host_node_size_y[node_id]/2; int bx = std::min(std::max((int)((node_x-xl)/bin_size_x), 0), num_bins_x-1); int by = std::min(std::max((int)((node_y-yl)/bin_size_y), 0), num_bins_y-1); int bin_id = bx*num_bins_y+by; //int sub_id = bin2node_map.at(bin_id).size(); bin2node_map.at(bin_id).push_back(node_id); } // construct node2bin_map for (unsigned int bin_id = 0; bin_id < bin2node_map.size(); ++bin_id) { for (unsigned int sub_id = 0; sub_id < bin2node_map[bin_id].size(); ++sub_id) { int node_id = bin2node_map[bin_id][sub_id]; BinMapIndex& bm_idx = node2bin_map.at(node_id); bm_idx.bin_id = bin_id; bm_idx.sub_id = sub_id; } } #ifdef DEBUG int max_num_nodes_per_bin = 0; for (unsigned int i = 0; i < bin2node_map.size(); ++i) { max_num_nodes_per_bin = std::max(max_num_nodes_per_bin, (int)bin2node_map[i].size()); } printf("[D] max_num_nodes_per_bin = %d\n", max_num_nodes_per_bin); #endif } /// @brief check whether placement is legal bool check_legality() const { return legalityCheckKernelCPU( x, y, node_size_x, node_size_y, flat_region_boxes, flat_region_boxes_start, node2fence_region_map, xl, yl, xh, yh, site_width, row_height, num_nodes, num_movable_nodes, num_regions ); } /// @brief check whether a cell is within its fence region bool inside_fence(int node_id, T xx, T yy) const { T node_xl = xx; T node_yl = yy; T node_xh = node_xl + node_size_x[node_id]; T node_yh = node_yl + node_size_y[node_id]; bool legal_flag = true; int region_id = node2fence_region_map[node_id]; if (region_id < num_regions) { int box_bgn = flat_region_boxes_start[region_id]; int box_end = flat_region_boxes_start[region_id + 1]; T node_area = (node_xh - node_xl) * (node_yh - node_yl); // I assume there is no overlap between boxes of a region // otherwise, preprocessing is required for (int box_id = box_bgn; box_id < box_end; ++box_id) { int box_offset = box_id*4; T box_xl = flat_region_boxes[box_offset]; T box_yl = flat_region_boxes[box_offset + 1]; T box_xh = flat_region_boxes[box_offset + 2]; T box_yh = flat_region_boxes[box_offset + 3]; T dx = std::max(std::min(node_xh, box_xh) - std::max(node_xl, box_xl), (T)0); T dy = std::max(std::min(node_yh, box_yh) - std::max(node_yl, box_yl), (T)0); T overlap = dx*dy; if (overlap > 0) { node_area -= overlap; } } if (node_area > 0) // not consumed by boxes within a region { legal_flag = false; } } return legal_flag; } /// @brief draw placement, shield not used code /*void draw_place(const char* filename) const { drawPlaceLauncher<T>( x, y, node_size_x, node_size_y, pin_offset_x, pin_offset_y, pin2node_map, num_nodes, num_movable_nodes, 0, flat_net2pin_start_map[num_nets], xl, yl, xh, yh, site_width, row_height, bin_size_x, bin_size_y, filename ); }*/ }; DREAMPLACE_END_NAMESPACE #endif
GB_binop__ne_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__ne_int64 // A.*B function (eWiseMult): GB_AemultB__ne_int64 // A*D function (colscale): GB_AxD__ne_int64 // D*A function (rowscale): GB_DxB__ne_int64 // C+=B function (dense accum): GB_Cdense_accumB__ne_int64 // C+=b function (dense accum): GB_Cdense_accumb__ne_int64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ne_int64 // C=scalar+B GB_bind1st__ne_int64 // C=scalar+B' GB_bind1st_tran__ne_int64 // C=A+scalar GB_bind2nd__ne_int64 // C=A'+scalar GB_bind2nd_tran__ne_int64 // C type: bool // A type: int64_t // B,b type: int64_t // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x != y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_NE || GxB_NO_INT64 || GxB_NO_NE_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__ne_int64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__ne_int64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__ne_int64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__ne_int64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__ne_int64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__ne_int64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__ne_int64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__ne_int64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t bij = Bx [p] ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__ne_int64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB_bind1st_tran__ne_int64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB_bind2nd_tran__ne_int64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
attribute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE % % A A T T R R I B B U U T E % % AAAAA T T RRRR I BBBB U U T EEE % % A A T T R R I B B U U T E % % A A T T R R IIIII BBBB UUU T EEEEE % % % % % % MagickCore Get / Set Image Attributes % % % % Software Design % % Cristy % % October 2002 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/identify.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/magick.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/segment.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageBoundingBox() returns the bounding box of an image canvas. % % The format of the GetImageBoundingBox method is: % % RectangleInfo GetImageBoundingBox(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o bounds: Method GetImageBoundingBox returns the bounding box of an % image canvas. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport RectangleInfo GetImageBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; PixelInfo target[3], zero; RectangleInfo bounds; register const Quantum *r; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); bounds.width=0; bounds.height=0; bounds.x=(ssize_t) image->columns; bounds.y=(ssize_t) image->rows; GetPixelInfo(image,&target[0]); image_view=AcquireVirtualCacheView(image,exception); r=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); if (r == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); return(bounds); } GetPixelInfoPixel(image,r,&target[0]); GetPixelInfo(image,&target[1]); r=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); if (r != (const Quantum *) NULL) GetPixelInfoPixel(image,r,&target[1]); GetPixelInfo(image,&target[2]); r=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); if (r != (const Quantum *) NULL) GetPixelInfoPixel(image,r,&target[2]); status=MagickTrue; GetPixelInfo(image,&zero); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; RectangleInfo bounding_box; register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif bounding_box=bounds; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,p,&pixel); if ((x < bounding_box.x) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.x=x; if ((x > (ssize_t) bounding_box.width) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[1]) == MagickFalse)) bounding_box.width=(size_t) x; if ((y < bounding_box.y) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.y=y; if ((y > (ssize_t) bounding_box.height) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[2]) == MagickFalse)) bounding_box.height=(size_t) y; p+=GetPixelChannels(image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif { if (bounding_box.x < bounds.x) bounds.x=bounding_box.x; if (bounding_box.y < bounds.y) bounds.y=bounding_box.y; if (bounding_box.width > bounds.width) bounds.width=bounding_box.width; if (bounding_box.height > bounds.height) bounds.height=bounding_box.height; } } image_view=DestroyCacheView(image_view); if ((bounds.width == 0) && (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); else { bounds.width-=(bounds.x-1); bounds.height-=(bounds.y-1); } return(bounds); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDepth() returns the depth of a particular image channel. % % The format of the GetImageDepth method is: % % size_t GetImageDepth(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t *current_depth, depth, number_threads; ssize_t y; /* Compute image depth. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); current_depth=(size_t *) AcquireQuantumMemory(number_threads, sizeof(*current_depth)); if (current_depth == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); status=MagickTrue; for (i=0; i < (ssize_t) number_threads; i++) current_depth[i]=1; if ((image->storage_class == PseudoClass) && (image->alpha_trait == UndefinedPixelTrait)) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ if ((image->colors) > 256) \ num_threads(GetMagickResourceLimit(ThreadResource)) #endif for (i=0; i < (ssize_t) image->colors; i++) { const int id = GetOpenMPThreadId(); while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth=MagickTrue; range=GetQuantumRange(current_depth[id]); if ((atDepth != MagickFalse) && (GetPixelRedTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(image->colormap[i].red,range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(image->colormap[i].green,range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(image->colormap[i].blue,range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } } depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } image_view=AcquireVirtualCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if (QuantumRange <= MaxMap) { size_t *depth_map; /* Scale pixels to desired (optimized with depth map). */ depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) { unsigned int depth; for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++) { Quantum pixel; QuantumAny range; range=GetQuantumRange(depth); pixel=(Quantum) i; if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range)) break; } depth_map[i]=depth; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelReadMask(image,p) == 0) { p+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (channel == IndexPixelChannel) || (channel == ReadMaskPixelChannel) || (channel == MetaPixelChannel)) continue; if (depth_map[ScaleQuantumToMap(p[i])] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(p[i])]; } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; depth_map=(size_t *) RelinquishMagickMemory(depth_map); current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } #endif /* Compute pixel depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelReadMask(image,p) == 0) { p+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (channel == IndexPixelChannel) || (channel == ReadMaskPixelChannel)) continue; while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { QuantumAny range; range=GetQuantumRange(current_depth[id]); if (p[i] == ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),range)) break; current_depth[id]++; } } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t u m D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantumDepth() returns the depth of the image rounded to a legal % quantum depth: 8, 16, or 32. % % The format of the GetImageQuantumDepth method is: % % size_t GetImageQuantumDepth(const Image *image, % const MagickBooleanType constrain) % % A description of each parameter follows: % % o image: the image. % % o constrain: A value other than MagickFalse, constrains the depth to % a maximum of MAGICKCORE_QUANTUM_DEPTH. % */ MagickExport size_t GetImageQuantumDepth(const Image *image, const MagickBooleanType constrain) { size_t depth; depth=image->depth; if (depth <= 8) depth=8; else if (depth <= 16) depth=16; else if (depth <= 32) depth=32; else if (depth <= 64) depth=64; if (constrain != MagickFalse) depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageType() returns the type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % The format of the GetImageType method is: % % ImageType GetImageType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ImageType GetImageType(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } if (IsImageMonochrome(image) != MagickFalse) return(BilevelType); if (IsImageGray(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(GrayscaleAlphaType); return(GrayscaleType); } if (IsPaletteImage(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageGray() returns grayscale if all the pixels in the image have % the same red, green, and blue intensities, and bi-level is the intensity is % either 0 or QuantumRange. Otherwise undefined is returned. % % The format of the IdentifyImageGray method is: % % ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageGray(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; register const Quantum *p; register ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return(image->type); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(UndefinedType); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelGray(image,p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsPixelMonochrome(image,p) == MagickFalse)) type=GrayscaleType; p+=GetPixelChannels(image); } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if ((type == GrayscaleType) && (image->alpha_trait != UndefinedPixelTrait)) type=GrayscaleAlphaType; return(type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image % have the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange. % % The format of the IdentifyImageMonochrome method is: % % MagickBooleanType IdentifyImageMonochrome(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; register ssize_t x; register const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelMonochrome(image,p) == MagickFalse) { type=UndefinedType; break; } p+=GetPixelChannels(image); } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if (type == BilevelType) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,IdentifyImageType(image,exception),exception); % % The format of the IdentifyImageType method is: % % ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageType(const Image *image, ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } if (IdentifyImageMonochrome(image,exception) != MagickFalse) return(BilevelType); if (IdentifyImageGray(image,exception) != UndefinedType) { if (image->alpha_trait != UndefinedPixelTrait) return(GrayscaleAlphaType); return(GrayscaleType); } if (IdentifyPaletteImage(image,exception) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageGray() returns MagickTrue if the type of the image is grayscale or % bi-level. % % The format of the IsImageGray method is: % % MagickBooleanType IsImageGray(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageGray(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageMonochrome() returns MagickTrue if type of the image is bi-level. % % The format of the IsImageMonochrome method is: % % MagickBooleanType IsImageMonochrome(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageMonochrome(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type == BilevelType) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O p a q u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageOpaque() returns MagickTrue if none of the pixels in the image have % an alpha value other than OpaqueAlpha (QuantumRange). % % Will return true immediatally is alpha channel is not available. % % The format of the IsImageOpaque method is: % % MagickBooleanType IsImageOpaque(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImageOpaque(const Image *image, ExceptionInfo *exception) { CacheView *image_view; register const Quantum *p; register ssize_t x; ssize_t y; /* Determine if image is opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelAlpha(image,p) != OpaqueAlpha) break; p+=GetPixelChannels(image); } if (x < (ssize_t) image->columns) break; } image_view=DestroyCacheView(image_view); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageDepth() sets the depth of the image. % % The format of the SetImageDepth method is: % % MagickBooleanType SetImageDepth(Image *image,const size_t depth, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o depth: the image depth. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageDepth(Image *image, const size_t depth,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; QuantumAny range; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (depth >= MAGICKCORE_QUANTUM_DEPTH) { image->depth=depth; return(MagickTrue); } range=GetQuantumRange(depth); if (image->storage_class == PseudoClass) { register ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].red),range),range); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].green),range),range); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].blue),range),range); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].alpha),range),range); } } status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if (QuantumRange <= MaxMap) { Quantum *depth_map; register ssize_t i; /* Scale pixels to desired (optimized with depth map). */ depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (Quantum *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range), range); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelReadMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (channel == IndexPixelChannel) || (channel == ReadMaskPixelChannel)) continue; q[i]=depth_map[ScaleQuantumToMap(q[i])]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); depth_map=(Quantum *) RelinquishMagickMemory(depth_map); if (status != MagickFalse) image->depth=depth; return(status); } #endif /* Scale pixels to desired depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelReadMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (channel == IndexPixelChannel) || (channel == ReadMaskPixelChannel)) continue; q[i]=ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(q[i]),range),range); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); if (status != MagickFalse) image->depth=depth; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageType() sets the type of image. Choose from these types: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % OptimizeType % % The format of the SetImageType method is: % % MagickBooleanType SetImageType(Image *image,const ImageType type, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: Image type. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type, ExceptionInfo *exception) { const char *artifact; ImageInfo *image_info; MagickBooleanType status; QuantizeInfo *quantize_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); status=MagickTrue; image_info=AcquireImageInfo(); image_info->dither=image->dither; artifact=GetImageArtifact(image,"dither"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"dither",artifact); switch (type) { case BilevelType: { if (SetImageMonochrome(image,exception) == MagickFalse) { status=TransformImageColorspace(image,GRAYColorspace,exception); (void) NormalizeImage(image,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=2; quantize_info->colorspace=GRAYColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); } image->colors=2; image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleType: { if (SetImageGray(image,exception) == MagickFalse) status=TransformImageColorspace(image,GRAYColorspace,exception); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleAlphaType: { if (SetImageGray(image,exception) == MagickFalse) status=TransformImageColorspace(image,GRAYColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case PaletteType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace,exception); if ((image->storage_class == DirectClass) || (image->colors > 256)) { quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=256; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); } image->alpha_trait=UndefinedPixelTrait; break; } case PaletteBilevelAlphaType: { ChannelType channel_mask; if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); channel_mask=SetImageChannelMask(image,AlphaChannel); (void) BilevelImage(image,(double) QuantumRange/2.0,exception); (void) SetImageChannelMask(image,channel_mask); quantize_info=AcquireQuantizeInfo(image_info); status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case PaletteAlphaType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->colorspace=TransparentColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case TrueColorType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case TrueColorAlphaType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case ColorSeparationType: { if (image->colorspace != CMYKColorspace) { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace,exception); status=TransformImageColorspace(image,CMYKColorspace,exception); } if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case ColorSeparationAlphaType: { if (image->colorspace != CMYKColorspace) { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace,exception); status=TransformImageColorspace(image,CMYKColorspace,exception); } if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case OptimizeType: case UndefinedType: break; } image_info=DestroyImageInfo(image_info); if (status == MagickFalse) return(status); image->type=type; return(MagickTrue); }
rnn_impl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file rnn_impl.h * \brief * \author Shu Zhang */ #ifndef MXNET_OPERATOR_RNN_IMPL_H_ #define MXNET_OPERATOR_RNN_IMPL_H_ #include <dmlc/logging.h> #include <dmlc/parameter.h> #include <mxnet/operator.h> #include <algorithm> #include <random> #include <map> #include <vector> #include <string> #include <utility> #include "./math.h" #include "./math_functions-inl.h" #include "./operator_common.h" #include "./mshadow_op.h" #include "./linalg.h" namespace mxnet { namespace op { template <typename DType> inline DType sigmoid(DType x) { return 1.0f / (1.0f + exp(-x)); } template <typename DType> inline DType relu(DType x) { return x > 0.0f ? static_cast<float>(x) : 0.0f; } template <typename DType> void LstmForwardTrainingSingleLayer(DType* ws, DType* rs, bool state_outputs, bool bid, const index_t T, const index_t N, const index_t I, const int H, const Tensor<cpu, 2, DType>& x, const Tensor<cpu, 2, DType>& hx, const Tensor<cpu, 2, DType>& cx, const Tensor<cpu, 3, DType>& y, DType* w_ptr, DType* b_ptr, DType* hy_ptr, DType* cy_ptr) { using namespace mshadow; const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I)); const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H)); const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H)); const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H)); const Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, 4 * H)); const Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, 4 * H)); const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H)); const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H)); Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H)); DType* c_ptr = bid ? rs + T * N * H * 7 : rs; Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H)); Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4)); const int offset = bid ? H : 0; const DType alpha = 1.0; const DType beta = 0.0; const index_t cell_size = N * H; linalg_gemm(x, wx, yx_flat, alpha, beta, false, true); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); for (index_t i = 0; i < T; ++i) { index_t t = bid ? T - 1 - i : i; linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true); #pragma omp parallel for num_threads(omp_threads) for (index_t jk = 0; jk < cell_size; ++jk) { index_t j = jk / H; index_t k = jk % H; DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]); DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]); DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]); DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]); DType ct = (i ? c[i - 1][j][k] : cx[j][k]) * ft + it * gt; DType ht = ot * tanh(ct); h[j][k] = ht; // reserve y[t][j][k + offset] = ht; c[i][j][k] = ct; ifgo[i][j][k][0] = it; ifgo[i][j][k][1] = ft; ifgo[i][j][k][2] = gt; ifgo[i][j][k][3] = ot; if (i == T - 1 && state_outputs) { hy_ptr[jk] = ht; cy_ptr[jk] = ct; } } } } template <typename DType> void LstmForwardTraining(DType* ws, DType* rs, bool state_outputs, const int L, const int D, const index_t T, const index_t N, const index_t I, const int H, DType* x_ptr, DType* hx_ptr, DType* cx_ptr, DType* w_ptr, DType* b_ptr, DType* y_ptr, DType* hy_ptr, DType* cy_ptr, const float dropout, std::mt19937& rnd_engine) { // NOLINT(runtime/references) DType* dropout_random = rs; DType* rs2 = dropout_random + (L - 1) * D * T * N * H; const int total_layers = D * L; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H)); Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H)); const index_t b_size = 2 * H * 4; const index_t r_size = D * T * N * H * 6; const index_t y_offset = T * N * H * 5; const index_t cell_size = N * H; int idx = 0; // state & cell state's idx; const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); for (int i = 0; i < L; ++i) { const index_t input_size = i ? H * D : I; const index_t w_size = (input_size + H) * H * 4; Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size)); Tensor<cpu, 3, DType> y(rs2 + y_offset, Shape3(T, N, H * D)); LstmForwardTrainingSingleLayer<DType>(ws, rs2, state_outputs, false, T, N, input_size, H, x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr); if (D == 2) { w_ptr += w_size; b_ptr += b_size; ++idx; if (state_outputs) { hy_ptr += cell_size; cy_ptr += cell_size; } LstmForwardTrainingSingleLayer<DType>(ws, rs2, state_outputs, true, T, N, input_size, H, x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr); } if (i != L - 1) { w_ptr += w_size; b_ptr += b_size; if (dropout > 0.0f) { std::uniform_real_distribution<float> distribution(0, 1); for (index_t j = 0; j < T * N * H * D; j++) { if (distribution(rnd_engine) < dropout) { dropout_random[i * T * N * H * D + j] = 0; y.dptr_[j] = 0; } else { dropout_random[i * T * N * H * D + j] = 1.0f - dropout; y.dptr_[j] = y.dptr_[j] / (1.0f - dropout); } } } x_ptr = y.dptr_; rs2 += r_size; ++idx; if (state_outputs) { hy_ptr += cell_size; cy_ptr += cell_size; } } } #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < T * N * H * D; ++i) { y_ptr[i] = (rs2 + y_offset)[i]; } } template <typename DType> void LstmForwardInferenceSingleLayer(DType* ws, bool state_outputs, bool bid, const index_t T, const index_t N, const index_t I, const int H, const int P, const Tensor<cpu, 2, DType>& x, const Tensor<cpu, 2, DType>& hx, const Tensor<cpu, 2, DType>& cx, const Tensor<cpu, 3, DType>& y, DType* w_ptr, DType* b_ptr, DType* hy_ptr, DType* cy_ptr) { using namespace mshadow; const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I)); const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, (P ? P : H))); Tensor<cpu, 2, DType> whr(w_ptr, Shape2(1, 1)); if (P > 0) whr = Tensor<cpu, 2, DType>(wh.dptr_ + P * 4 * H, Shape2(P, H)); const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H)); const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H)); Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, H * 4)); Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, H * 4)); const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H)); const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H)); Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H)); Tensor<cpu, 2, DType> c(h.dptr_ + N * H, Shape2(N, H)); Tensor<cpu, 2, DType> r(hy_ptr, Shape2(1, 1)); if (P > 0) r = Tensor<cpu, 2, DType>(hy_ptr, Shape2(N, P)); const int offset = bid ? H : 0; const int proj_offset = bid ? P : 0; const DType alpha = 1.0; const DType beta = 0.0; const index_t cell_size = N * H; linalg_gemm(x, wx, yx_flat, alpha, beta, false, true); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); for (index_t i = 0; i < T; ++i) { index_t t = bid ? T - 1 - i : i; if (P > 0) { linalg_gemm(i ? r : hx, wh, yh_flat, alpha, beta, false, true); } else { linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true); } #pragma omp parallel for num_threads(omp_threads) for (index_t jk = 0; jk < cell_size; ++jk) { int j = jk / H; int k = jk % H; DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]); DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]); DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]); DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]); DType ct = (i ? c[j][k] : cx[j][k]) * ft + it * gt; DType ht = ot * tanh(ct); if (P == 0) y[t][j][k + offset] = ht; if (i == T - 1 && state_outputs) { if (P == 0) hy_ptr[jk] = ht; cy_ptr[jk] = ct; } else { c[j][k] = ct; } h[j][k] = ht; } if (P > 0) { linalg_gemm(h, whr, r, alpha, beta, false, true); #pragma GCC diagnostic push #if __GNUC__ >= 8 #pragma GCC diagnostic ignored "-Wclass-memaccess" #endif #pragma omp parallel for num_threads(omp_threads) for (int j = 0; j < N; ++j) { std::memcpy(y[t][j].dptr_ + proj_offset, r[j].dptr_, P * sizeof(DType)); } #pragma GCC diagnostic pop } } } template <typename DType> void LstmForwardInference(DType* ws, bool state_outputs, const int L, const int D, const index_t T, const index_t N, const index_t I, const int H, const int P, DType* x_ptr, DType* hx_ptr, DType* cx_ptr, DType* w_ptr, DType* b_ptr, DType* y_ptr, DType* hy_ptr, DType* cy_ptr) { const int total_layers = D * L; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, P ? P : H)); Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H)); const index_t b_size = 2 * H * 4; const index_t cell_size = N * H; const index_t projection_size = (P ? P : H) * N; DType* y_tmp_ptr = ws + (T + 1) * cell_size * 4 + cell_size * 2; DType* y_cur_ptr = y_ptr; int idx = 0; // state & cell state's idx; bool flag = L % 2 ? false : true; for (int i = 0; i < L; ++i) { const index_t input_size = i ? (P ? P : H) * D : I; index_t w_size = (input_size + (P ? P : H)) * H * 4; if (P > 0) { w_size += P * H; } // If bidirectional, need space to save current layer output y. if (D == 2) { y_cur_ptr = flag ? y_tmp_ptr : y_ptr; flag = !flag; } Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size)); Tensor<cpu, 3, DType> y(y_cur_ptr, Shape3(T, N, (P ? P : H) * D)); LstmForwardInferenceSingleLayer<DType>(ws, state_outputs, false, T, N, input_size, H, P, x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr); // If bidirectional, then calculate the reverse direction's forward result. if (D == 2) { w_ptr += w_size; b_ptr += b_size; ++idx; if (state_outputs) { hy_ptr += projection_size; cy_ptr += cell_size; } LstmForwardInferenceSingleLayer<DType>(ws, state_outputs, true, T, N, input_size, H, P, x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr); } // Don't need to move pointer in the last layer. if (i != L - 1) { w_ptr += w_size; b_ptr += b_size; x_ptr = y_cur_ptr; ++idx; if (state_outputs) { hy_ptr += projection_size; cy_ptr += cell_size; } } } } template <typename DType> void LstmBackwardSingleLayer(DType* ws, DType* rs, DType* tmp_buf, bool bid, const index_t T, const index_t N, const index_t I, const int H, const Tensor<cpu, 2, DType>& x, const Tensor<cpu, 2, DType>& hx, const Tensor<cpu, 2, DType>& cx, const Tensor<cpu, 3, DType>& y, const Tensor<cpu, 3, DType>& dy, const Tensor<cpu, 2, DType>& dx, const Tensor<cpu, 2, DType>& dhx, const Tensor<cpu, 2, DType>& dcx, DType* dhy_ptr, DType* dcy_ptr, DType* w_ptr, DType* dw_ptr, DType* db_ptr, int req_data, int req_params, int req_state, int req_statecell) { using namespace mshadow; const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I)); const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H)); Tensor<cpu, 2, DType> dwx(dw_ptr, Shape2(H * 4, I)); Tensor<cpu, 2, DType> dwh(dw_ptr + I * H * 4, Shape2(H * 4, H)); Tensor<cpu, 1, DType> dbx(db_ptr, Shape1(H * 4)); Tensor<cpu, 1, DType> dbh(dbx.dptr_ + H * 4, Shape1(H * 4)); DType* c_ptr = bid ? rs + T * N * H * 7 : rs; const Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H)); const Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4)); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (req_params != kNullOp && req_params != kAddTo) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < H * 4 * H; ++i) { dwh.dptr_[i] = 0; } #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 4 * H; ++i) { dbx.dptr_[i] = 0; dbh.dptr_[i] = 0; } } Tensor<cpu, 4, DType> difgo(ws, Shape4(T, N, 4, H)); Tensor<cpu, 2, DType> dh(ws + T * N * H * 4, Shape2(N, H)); Tensor<cpu, 2, DType> dc(dh.dptr_ + N * H, Shape2(N, H)); Tensor<cpu, 2, DType> htmp(dc.dptr_ + N * H, Shape2(N, H)); const int offset = bid ? H : 0; const DType alpha = 1.0; const DType beta0 = 0.0; const DType beta1 = 1.0; const DType beta2 = 2.0; const index_t cell_size = N * H; if (dhy_ptr != nullptr) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < cell_size; ++i) { dh.dptr_[i] = dhy_ptr[i]; } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < cell_size; ++i) { dh.dptr_[i] = 0; } } if (dcy_ptr != nullptr) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < cell_size; ++i) { dc.dptr_[i] = dcy_ptr[i]; } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < cell_size; ++i) { dc.dptr_[i] = 0; } } for (index_t i = T - 1; i >= 0; --i) { index_t t = bid ? T - 1 - i : i; index_t tnext = bid ? t + 1 : t - 1; const Tensor<cpu, 2, DType>& dhnext = i ? dh : dhx; const Tensor<cpu, 2, DType>& dcnext = i ? dc : dcx; const Tensor<cpu, 2, DType>& hnext = i ? htmp : hx; const Tensor<cpu, 2, DType>& cnext = i ? c[i - 1] : cx; #pragma omp parallel for num_threads(omp_threads) for (index_t jk = 0; jk < cell_size; ++jk) { index_t j = jk / H; index_t k = jk % H; DType tc = tanh(c[i][j][k]); DType it = ifgo[i][j][k][0]; DType ft = ifgo[i][j][k][1]; DType gt = ifgo[i][j][k][2]; DType ot = ifgo[i][j][k][3]; dh[j][k] += dy[t][j][k + offset]; dc[j][k] += dh[j][k] * ot * (1 - tc * tc); difgo[t][j][0][k] = dc[j][k] * gt * it * (1 - it); difgo[t][j][1][k] = dc[j][k] * cnext[j][k] * ft * (1 - ft); difgo[t][j][2][k] = dc[j][k] * it * (1 - gt * gt); difgo[t][j][3][k] = dh[j][k] * tc * ot * (1 - ot); if (req_statecell != kNullOp || i > 0) { dcnext[j][k] = dc[j][k] * ft; } if (i) { htmp[j][k] = y[tnext][j][k + offset]; } } Tensor<cpu, 2, DType> dyh(difgo[t].dptr_, Shape2(N, H * 4)); if (req_state != kNullOp || i > 0) { linalg_gemm(dyh, wh, dhnext, alpha, beta0, false, false); } if (req_params != kNullOp) { if (req_params != kAddTo) { linalg_gemm(dyh, hnext, dwh, alpha, beta1, true, false); } else { linalg_gemm(dyh, hnext, dwh, alpha, beta2, true, false); // generate dwx every time step for AddTo Tensor<cpu, 2, DType> x_t(x.dptr_ + i * N * I, Shape2(N, I)); Tensor<cpu, 2, DType> dyx_t(difgo.dptr_ + i * N * H * 4, Shape2(N, H * 4)); linalg_gemm(dyx_t, x_t, dwx, alpha, beta2, true, false); } } } Tensor<cpu, 2, DType> dyx(difgo.dptr_, Shape2(T * N, H * 4)); if (req_data != kNullOp) { linalg_gemm(dyx, wx, dx, alpha, bid ? beta1 : beta0, false, false); } if (req_params != kNullOp && req_params != kAddTo) { linalg_gemm(dyx, x, dwx, alpha, beta0, true, false); } const index_t row = T * N; const index_t col = H * 4; if (req_params != kNullOp) { if (req_params != kAddTo) { for (index_t i = 0; i < row; ++i) { #pragma omp parallel for num_threads(omp_threads) for (index_t j = 0; j < col; ++j) { dbx[j] += dyx[i][j]; dbh[j] = dbx[j]; } } } else { const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf, Shape2(col, T)); const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + col * T, Shape2(col, T)); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < col * T; ++i) { tmp_dbx.dptr_[i] = 0; tmp_dbh.dptr_[i] = 0; } for (index_t t = T - 1; t >= 0; --t) { #pragma omp parallel for num_threads(omp_threads) for (index_t j = 0; j < col; ++j) { for (index_t i = 0; i < N; ++i) { tmp_dbx[j][t] += dyx[t * N + i][j]; tmp_dbh[j][t] = tmp_dbx[j][t]; } } #pragma omp parallel for num_threads(omp_threads) for (index_t j = 0; j < col; ++j) { dbx[j] += tmp_dbx[j][t] + dbx[j]; dbh[j] += tmp_dbh[j][t] + dbh[j]; } } } } } template <typename DType> void LstmBackward(DType* ws, DType* rs, const int L, const int D, const index_t T, const index_t N, const index_t I, const int H, DType* x_ptr, DType* hx_ptr, DType* cx_ptr, DType* w_ptr, DType* y_ptr, DType* dy_ptr, DType* dhy_ptr, DType* dcy_ptr, DType* dx_ptr, DType* dhx_ptr, DType* dcx_ptr, DType* dw_ptr, DType* db_ptr, int req_data, int req_params, int req_state, int req_statecell, const float dropout) { DType* dropout_random = rs + (L - 1) * D * T * N * H; DType* rs2 = rs + (L - 1) * D * T * N * H; DType* tmp_buf = ws; DType* ws2 = tmp_buf + 8 * T * H; const int total_layers = D * L; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H)); Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H)); Tensor<cpu, 3, DType> dhx(dhx_ptr, Shape3(total_layers, N, H)); Tensor<cpu, 3, DType> dcx(dcx_ptr, Shape3(total_layers, N, H)); const index_t b_size = 2 * H * 4; const index_t r_size = D * T * N * H * 6; const index_t y_offset = T * N * H * 5; const index_t w_size1 = (I + H) * H * 4; // first layer const index_t w_size2 = (D * H + H) * H * 4; // other layers const index_t cell_size = N * H; const index_t y_size = T * N * H * D; DType* dy_tmp_ptr = ws2 + T * cell_size * 4 + cell_size * 3; for (int i = L - 1; i >= 0; --i) { const index_t input_size = i ? H * D : I; const index_t w_size = i ? w_size2 : w_size1; int idx = i * D; DType* w_cur_ptr = i ? w_ptr + (w_size1 + (i - 1) * w_size2) * D : w_ptr; DType* dw_cur_ptr = i ? dw_ptr + (w_size1 + (i - 1) * w_size2) * D : dw_ptr; DType* db_cur_ptr = db_ptr + i * b_size * D; DType* rs_cur_ptr = rs2 + i * r_size; DType* dhy_cur_ptr = dhy_ptr ? dhy_ptr + i * cell_size * D : nullptr; DType* dcy_cur_ptr = dcy_ptr ? dcy_ptr + i * cell_size * D : nullptr; Tensor<cpu, 3, DType> y(rs_cur_ptr + y_offset, Shape3(T, N, H * D)); Tensor<cpu, 3, DType> dy(dy_ptr, Shape3(T, N, H * D)); Tensor<cpu, 2, DType> x(i ? y.dptr_ - r_size : x_ptr, Shape2(T * N, input_size)); Tensor<cpu, 2, DType> dx(i ? dy_tmp_ptr : dx_ptr, Shape2(T * N, input_size)); LstmBackwardSingleLayer<DType>(ws2, rs_cur_ptr, tmp_buf, false, T, N, input_size, H, x, hx[idx], cx[idx], y, dy, dx, dhx[idx], dcx[idx], dhy_cur_ptr, dcy_cur_ptr, w_cur_ptr, dw_cur_ptr, db_cur_ptr, req_data, req_params, req_state, req_statecell); if (D == 2) { w_cur_ptr += w_size; dw_cur_ptr += w_size; db_cur_ptr += b_size; ++idx; dhy_cur_ptr = dhy_ptr ? dhy_cur_ptr + cell_size : nullptr; dcy_cur_ptr = dcy_ptr ? dcy_cur_ptr + cell_size : nullptr; LstmBackwardSingleLayer<DType>(ws2, rs_cur_ptr, tmp_buf, true, T, N, input_size, H, x, hx[idx], cx[idx], y, dy, dx, dhx[idx], dcx[idx], dhy_cur_ptr, dcy_cur_ptr, w_cur_ptr, dw_cur_ptr, db_cur_ptr, req_data, req_params, req_state, req_statecell); // Prevent overwritting dy while calculating dx in left2right layer const int loop_iteration = (L - 1) - i; dy_tmp_ptr = loop_iteration % 2 ? dy_tmp_ptr - y_size : dy_tmp_ptr + y_size; } if (dropout > 0.0f && i > 0 && req_data != kNullOp) { dropout_random = dropout_random - T * N * D * H; const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); #pragma omp parallel for num_threads(omp_threads) for (index_t j = 0; j < T * N * D * H; j++) { if (dropout_random[j] == 0) { dx.dptr_[j] = 0; } else { dx.dptr_[j] = dx.dptr_[j] / (1.0f - dropout); } } } dy_ptr = dx.dptr_; } } template <typename DType> void GruForwardInferenceSingleLayer(DType* ws, DType* tmp_buf, bool state_outputs, const int D, const index_t T, const index_t N, const index_t I, const int H, const Tensor<cpu, 2, DType>& x, const Tensor<cpu, 2, DType>& hx, DType* wx_ptr, DType* wh_ptr, DType* bx_ptr, DType* bh_ptr, DType* y_ptr, DType* hy_ptr) { DType* ht = y_ptr; DType* ht_1 = y_ptr; DType* back_ht_1 = y_ptr + (T - 1) * N * H * D + H; DType* back_ht = back_ht_1; DType* gemmC1 = ws; // [D, T, N, 3 * H] DType* gemmC2 = gemmC1 + D * T * N * 3 * H; // N * 3 * H DType* rt = gemmC2 + N * 3 * H; DType* zt = rt + N * H; DType* nt = zt + N * H; DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H; DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H; DType* back_bx_ptr = (bx_ptr != nullptr) ? bx_ptr + 3 * H * 2 : nullptr; DType* back_bh_ptr = (bh_ptr != nullptr) ? bh_ptr + 3 * H * 2 : nullptr; DType* back_gemmC1 = gemmC1 + T * N * 3 * H; DType* gemmC1_t = gemmC1; const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I)); const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H)); const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(3, H)); const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(3, H)); const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I)); const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H)); const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(3, H)); const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(3, H)); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (D == 1) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * H + j] = hx[i][j]; } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * D * H + j] = hx[i][j]; back_ht_1[i * D * H + j] = hx[N + i][j]; } } Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, 3 * H)); Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, 3 * H)); Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, 3 * H)); // x * wx.T : [T * N, I] * [I, 3 * H] DType alpha = 1.0; DType beta = 0.0; linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true); if (D == 2) { linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true); } for (index_t t = 0; t < T; t++) { // perform the first direction, X * wx and H * wh for each step // ht-1 * wh, ht-1:[N, H] wh:[3 * H, H] Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H)); if (D == 1) { linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true); } else { Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N)); linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true); } gemmC1_t = gemmC1 + t * N * 3 * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t rtb = i * 3 * H; index_t ztb = i * 3 * H + H; index_t ntb = i * 3 * H + 2 * H; rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j] + bx[0][j] + bh[0][j]); zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j] + bx[1][j] + bh[1][j]); nt[i * H + j] = tanh(gemmC1_t[ntb + j] + bx[2][j] + rt[i * H + j] * (gemmC2[ntb + j] + bh[2][j])); ht[i * D * H + j] = (1 - zt[i * H + j]) * nt[i * H + j] + zt[i * H + j] * ht_1[i * D * H + j]; } } ht_1 = ht; ht = ht + D * H * N; // perform the second direction if (D == 2) { gemmC1_t = back_gemmC1 + (T - 1 - t) * N * 3 * H; Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H)); Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N)); linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t rtb = i * 3 * H; index_t ztb = i * 3 * H + H; index_t ntb = i * 3 * H + 2 * H; rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j] + back_bx[0][j] + back_bh[0][j]); zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j] + back_bx[1][j] + back_bh[1][j]); nt[i * H + j] = tanh(gemmC1_t[ntb + j] + back_bx[2][j] + rt[i * H + j] * (gemmC2[ntb + j] + back_bh[2][j])); back_ht[i * D * H + j] = (1 - zt[i * H + j]) * nt[i * H + j] + zt[i * H + j] * back_ht_1[i * D * H + j]; } } back_ht_1 = back_ht; back_ht = back_ht - D * H * N; } } // copy last state to hy, from(N, H * D) to (D, N, H) if (state_outputs) { if (D == 1) { DType* y_start = y_ptr + (T - 1) * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * H + j]; } } else { DType* y_start = y_ptr + (T - 1) * N * H * D; DType* y_back_start = y_ptr + H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * D * H + j]; hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j]; } } } } template <typename DType> void GruForwardInference(DType* ws, bool state_outputs, const int L, const int D, const index_t T, const index_t N, index_t I, const int H, DType* x_ptr, DType* hx_ptr, DType* w_ptr, DType* y_ptr, DType* hy_ptr) { DType* wx = w_ptr; DType* wh = wx + I * H * 3; DType* bx = wh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3) + (L - 1) * ((D + 1) * H) * H * 3 * D; DType* bh = bx + H * 3; DType* y_tmp = ws; DType* y_l = x_ptr; DType* tmp_buf = y_tmp + D * T * N * H; DType* ws2 = y_tmp + D * T * N * H + D * H * N; DType* wx_l = wx; DType* wh_l = wh; DType* bx_l = bx; DType* bh_l = bh; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H)); DType* hy_l = hy_ptr; for (int l = 0; l < L; l++) { Tensor<cpu, 2, DType> x_l(y_l, Shape2(T * N, I)); if ((L + l) % 2) { y_l = y_ptr; } else { y_l = y_tmp; } Tensor<cpu, 2, DType> hx_l = hx[D * l]; GruForwardInferenceSingleLayer<DType>( ws2, tmp_buf, state_outputs, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, bx_l, bh_l, y_l, hy_l); hy_l = hy_l + D * N * H; bx_l = bx_l + 3 * H * D * 2; bh_l = bh_l + 3 * H * D * 2; wx_l = wx_l + I * H * 3 * D + H * H * 3 * D; if (l == 0) { I = D * H; } wh_l = wx_l + I * 3 * H; } } template <typename DType> void GruForwardTrainingSingleLayer(DType* ws, DType* tmp_buf, bool state_outputs, const int D, const index_t T, const index_t N, const index_t I, const int H, const Tensor<cpu, 2, DType>& x, const Tensor<cpu, 2, DType>& hx, DType* wx_ptr, DType* wh_ptr, DType* bx_ptr, DType* bh_ptr, DType* gateR, DType* gateZ, DType* gateN, DType* Mnh, DType* y_ptr, DType* hy_ptr) { DType* ht = y_ptr; DType* ht_1 = y_ptr; DType* back_ht_1 = y_ptr + (T - 1) * N * H * D + H; DType* back_ht = back_ht_1; DType* gemmC1 = ws; // [D, T, N, 3 * H] DType* gemmC2 = gemmC1 + D * T * N * 3 * H; // N * 3 * H DType* rt = gateR; DType* zt = gateZ; DType* nt = gateN; DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H; DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H; DType* back_bx_ptr = (bx_ptr != nullptr) ? bx_ptr + 3 * H * 2 : nullptr; DType* back_bh_ptr = (bh_ptr != nullptr) ? bh_ptr + 3 * H * 2 : nullptr; DType* back_gateR = gateR + T * N * H; DType* back_gateZ = gateZ + T * N * H; DType* back_gateN = gateN + T * N * H; DType* back_Mnh = Mnh + T * N * H; DType* back_gemmC1 = gemmC1 + T * N * 3 * H; DType* gemmC1_t = gemmC1; const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I)); const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H)); const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(3, H)); const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(3, H)); const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I)); const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H)); const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(3, H)); const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(3, H)); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (D == 1) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * H + j] = hx[i][j]; } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * D * H + j] = hx[i][j]; back_ht_1[i * D * H + j] = hx[N + i][j]; } } Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, 3 * H)); Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, 3 * H)); Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, 3 * H)); // x * wx.T : [T * N, I] * [I, 3 * H] DType alpha = 1.0; DType beta = 0.0; linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true); if (D == 2) { linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true); } for (index_t t = 0; t < T; t++) { // perform the first direction, X * wx and H * wh for each step // ht-1 * wh, ht-1:[N, H] wh:[3 * H, H] Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H)); if (D == 1) { linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true); } else { Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N)); linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true); } rt = gateR + t * N * H; zt = gateZ + t * N * H; nt = gateN + t * N * H; gemmC1_t = gemmC1 + t * N * 3 * H; DType* Mnht = Mnh + t * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t rtb = i * 3 * H; index_t ztb = i * 3 * H + H; index_t ntb = i * 3 * H + 2 * H; Mnht[i * H + j] = gemmC2[ntb + j] + bh[2][j]; rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j] + bx[0][j] + bh[0][j]); zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j] + bx[1][j] + bh[1][j]); nt[i * H + j] = tanh(gemmC1_t[ntb + j] + bx[2][j] + rt[i * H + j] * (gemmC2[ntb + j] + bh[2][j])); ht[i * D * H + j] = (1 - zt[i * H + j]) * nt[i * H + j] + zt[i * H + j] * ht_1[i * D * H + j]; } } ht_1 = ht; ht = ht + D * H * N; // perform the second direction if (D == 2) { rt = back_gateR + (T - 1 - t) * N * H; zt = back_gateZ + (T - 1 - t) * N * H; nt = back_gateN + (T - 1 - t) * N * H; gemmC1_t = back_gemmC1 + (T - 1 - t) * N * 3 * H; Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H)); Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N)); linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true); DType* back_Mnht = back_Mnh + (T - 1 - t) * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t rtb = i * 3 * H; index_t ztb = i * 3 * H + H; index_t ntb = i * 3 * H + 2 * H; back_Mnht[i * H + j] = gemmC2[ntb + j] + back_bh[2][j]; rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j] + back_bx[0][j] + back_bh[0][j]); zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j] + back_bx[1][j] + back_bh[1][j]); nt[i * H + j] = tanh(gemmC1_t[ntb + j] + back_bx[2][j] + rt[i * H + j] * (gemmC2[ntb + j] + back_bh[2][j])); back_ht[i * D * H + j] = (1 - zt[i * H + j]) * nt[i * H + j] + zt[i * H + j] * back_ht_1[i * D * H + j]; } } back_ht_1 = back_ht; back_ht = back_ht - D * H * N; } } // copy last state to hy, from(N, H * D) to (D, N, H) if (state_outputs) { if (D == 1) { DType* y_start = y_ptr + (T - 1) * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * H + j]; } } else { DType* y_start = y_ptr + (T - 1) * N * H * D; DType* y_back_start = y_ptr + H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * D * H + j]; hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j]; } } } } template <typename DType> void GruForwardTraining(DType* ws, DType* rs, bool state_outputs, const int L, const int D, const index_t T, const index_t N, index_t I, const int H, DType* x_ptr, DType* hx_ptr, DType* w_ptr, DType* y_ptr, DType* hy_ptr, const float dropout, std::mt19937& rnd_engine) { // NOLINT(runtime/references) DType* wx = w_ptr; DType* wh = wx + I * H * 3; DType* bx = wh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3) + (L - 1) * ((D + 1) * H) * H * 3 * D; DType* bh = bx + H * 3; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H)); DType* hy_l = hy_ptr; DType* gateR_l = rs; DType* gateZ_l = gateR_l + L * T * D * N * H; DType* gateN_l = gateZ_l + L * T * D * N * H; DType* y_l = gateN_l + L * T * D * N * H; DType* Mnh_l = y_l + L * T * N * H * D; DType* dropout_random = Mnh_l + L * D * T * N * H; DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H; DType* ws2 = tmp_buf + D * N * H; DType* wx_l = wx; DType* wh_l = wh; DType* bx_l = bx; DType* bh_l = bh; DType* y_tmp = x_ptr; for (int l = 0; l < L; l++) { if (l != 0) { y_tmp = y_l; y_l = y_l + T * N * H * D; } if (dropout > 0.0f && l > 0) { std::uniform_real_distribution<float> distribution(0, 1); for (index_t i = 0; i < T * N * I; i++) { if (distribution(rnd_engine) < dropout) { dropout_random[(l - 1) * T * N * I + i] = 0; y_tmp[i] = 0; } else { dropout_random[(l - 1) * T * N * I + i] = 1.0f - dropout; y_tmp[i] = y_tmp[i] / (1.0f - dropout); } } } Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I)); Tensor<cpu, 2, DType> hx_l = hx[D * l]; GruForwardTrainingSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, bx_l, bh_l, gateR_l, gateZ_l, gateN_l, Mnh_l, y_l, hy_l); gateR_l = gateR_l + T * D * N * H; gateZ_l = gateZ_l + T * D * N * H; gateN_l = gateN_l + T * D * N * H; Mnh_l = Mnh_l + T * D * N * H; hy_l = hy_l + D * N * H; bx_l = bx_l + 3 * H * D * 2; bh_l = bh_l + 3 * H * D * 2; wx_l = wx_l + I * H * 3 * D + H * H * 3 * D; if (l == 0) { I = D * H; } wh_l = wx_l + I * 3 * H; } const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < T * N * H * D; ++i) { y_ptr[i] = y_l[i]; } } template <typename DType> void GruBackwardSingleLayer(DType* ws, DType* tmp_buf, const int D, const index_t T, const index_t N, const index_t I, const int H, const Tensor<cpu, 2, DType>& x, const Tensor<cpu, 2, DType>& hx, DType* wx_ptr, DType* wh_ptr, DType* y_ptr, DType* dy_ptr, DType* dhy_ptr, DType* gateR, DType* gateZ, DType* gateN, DType* Mnh, DType* dx, DType* dhx, DType* dwx, DType* dwh, DType* dbx, DType* dbh, int req_data, int req_params, int req_state) { DType* dyt; DType* ht1; // [N, D, H] DType* rt; DType* zt; DType* nt; DType* dat; DType* dart; DType* dar = ws; // [T, N, 3 * H] DType* da = dar + T * N * 3 * H; // [T, N, 3 * H] DType* dht1 = da + T * N * 3 * H; // [D, N, H] DType* hx_ = dht1 + D * N * H; // [N, D, H] DType* Mnht = Mnh; DType* back_ht1; DType* back_dht1 = dht1 + N * H; // [N, H] DType* back_Mnht = Mnh + T * N * H; DType* back_gateR = gateR + T * N * H; DType* back_gateZ = gateZ + T * N * H; DType* back_gateN = gateN + T * N * H; DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H; DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H; DType* back_dwx = dwx + I * 3 * H + H * 3 * H; DType* back_dwh = dwh + I * 3 * H + H * 3 * H; DType* back_dbx = dbx + 3 * H * 2; DType* back_dbh = dbh + 3 * H * 2; DType alpha = 1.0; DType beta = 0.0; const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I)); const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H)); const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I)); const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H)); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (req_params != kNullOp && req_params != kAddTo) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < D * H * 3 * H; ++i) { dwh[i] = 0; } #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < D * 3 * H; ++i) { dbx[i] = 0; dbh[i] = 0; } } #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N * H; ++i) { if (dhy_ptr) { dht1[i] = dhy_ptr[i]; } else { dht1[i] = 0; } } #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { hx_[i * D * H + j] = hx[i][j]; } } if (D == 2) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N * H; ++i) { if (dhy_ptr) { back_dht1[i] = dhy_ptr[N * H + i]; } else { back_dht1[i] = 0; } } #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { hx_[i * D * H + H + j] = hx[N + i][j]; } } } for (index_t t = T - 1; t >= 0; --t) { if (t) { ht1 = y_ptr + (t - 1) * N * D * H; } else { ht1 = hx_; } // add dy[T, N, D, H] to dhy[D, N, H] dyt = dy_ptr + t * N * D * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { dht1[i * H + j] += dyt[i * D * H + j]; } } rt = gateR + t * N * H; zt = gateZ + t * N * H; nt = gateN + t * N * H; Mnht = Mnh + t * N * H; dat = da + t * N * 3 * H; dart = dar + t * N * 3 * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { int nid = i * 3 * H + 2 * H + j; int zid = i * 3 * H + H + j; int rid = i * 3 * H + j; int id = i * H + j; dat[nid] = dht1[id] * (1 - zt[id]) * (1 - nt[id] * nt[id]); dart[zid] = dat[zid] = dht1[id] * (ht1[i * D * H + j] - nt[id]) * zt[id] * (1 - zt[id]); dart[rid] = dat[rid] = dat[nid] * Mnht[id] * rt[id] * (1 - rt[id]); dart[nid] = dat[nid] * rt[id]; dht1[id] = dht1[id] * zt[id]; } } if (req_params != kNullOp) { alpha = 1.0; beta = 1.0; // dht1 = dart * wh [N, H] = [N, 3 * H] * [3 * H, H] Tensor<cpu, 2, DType> d_dht1(dht1, Shape2(N, H)); Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, 3 * H)); linalg_gemm(d_dart, wh, d_dht1, alpha, beta, false, false); if (req_params == kAddTo) { beta = 2.0; // dwx = da.T * x [3 * H, I] = [3 * H, N] * [N, I] for AddTo Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I)); Tensor<cpu, 2, DType> d_dat(dat, Shape2(N, 3 * H)); Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(3 * H, I)); linalg_gemm(d_dat, d_xt, d_dwx, alpha, beta, true, false); } // dwh = dart.T * ht1 [3 * H, H] = [3 * H, N] * [N, H] Tensor<cpu, 2, DType> d_ht1(ht1, Shape2(N, D * H)); Tensor<cpu, 2, DType> d_dwh(dwh, Shape2(3 * H, H)); Tensor<cpu, 3, DType> d_ht1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); d_ht1_tmp = reshape(d_ht1.T(), Shape3(D, H, N)); linalg_gemm(d_dart, d_ht1_tmp[0], d_dwh, alpha, beta, true, true); } } if (req_params != kNullOp) { // dbx = e * da [1, 3 * H] = [1, N] * [N, 3 * H] if (req_params != kAddTo) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 3 * H; ++i) { for (index_t j = 0; j < N * T; ++j) { dbx[i] += da[j * 3 * H + i]; dbh[i] += dar[j * 3 * H + i]; } } } else { const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H * 3, T)); const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + 3 * H * T, Shape2(H * 3, T)); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < H * T * 3; ++i) { tmp_dbx.dptr_[i] = 0; tmp_dbh.dptr_[i] = 0; } for (index_t t = T - 1; t >= 0; --t) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 3 * H; ++i) { for (index_t j = 0; j < N; ++j) { tmp_dbx[i][t] += da[t * N * 3 * H + j * 3 * H + i]; tmp_dbh[i][t] += dar[t * N * 3 * H + j * 3 * H + i]; } } #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 3 * H; ++i) { dbx[i] += tmp_dbx[i][t] + dbx[i]; dbh[i] += tmp_dbh[i][t] + dbh[i]; } } } } alpha = 1.0; beta = 0.0; // dx = da * wx [T * N, I] = [T * N, 3 * H] * [3 * H, I] Tensor<cpu, 2, DType> d_da(da, Shape2(T * N, 3 * H)); if (req_data != kNullOp) { Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I)); linalg_gemm(d_da, wx, d_dx, alpha, beta, false, false); } // dwx = da.T * x [3 * H, I] = [3 * H, T * N] * [T * N, I] if (req_params != kNullOp && req_params != kAddTo) { Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(3 * H, I)); linalg_gemm(d_da, x, d_dwx, alpha, beta, true, false); } if (D == 2) { for (index_t t = 0; t < T; ++t) { if (t == T - 1) { back_ht1 = hx_; } else { back_ht1 = y_ptr + (t + 1) * N * D * H; } // add dy[T, N, D, H] to dhy[D, N, H] dyt = dy_ptr + t * N * D * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { back_dht1[i * H + j] += dyt[i * D * H + H + j]; } } rt = back_gateR + t * N * H; zt = back_gateZ + t * N * H; nt = back_gateN + t * N * H; back_Mnht = Mnh + (T + t) * N * H; dat = da + t * N * 3 * H; dart = dar + t * N * 3 * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t nid = i * 3 * H + 2 * H + j; index_t zid = i * 3 * H + H + j; index_t rid = i * 3 * H + j; index_t id = i * H + j; dat[nid] = back_dht1[id] * (1 - zt[id]) * (1 - nt[id] * nt[id]); dart[zid] = dat[zid] = back_dht1[id] * (back_ht1[i * D * H + H + j] - nt[id]) * zt[id] * (1 - zt[id]); dart[rid] = dat[rid] = dat[nid] * back_Mnht[id] * rt[id] * (1 - rt[id]); dart[nid] = dat[nid] * rt[id]; back_dht1[id] = back_dht1[id] * zt[id]; } } if (req_params != kNullOp) { alpha = 1.0; beta = 1.0; // dht1 = da * wh [N, H] = [N, 3 * H] * [3 * H, H] Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, 3 * H)); Tensor<cpu, 2, DType> d_back_dht1(back_dht1, Shape2(N, H)); linalg_gemm(d_dart, back_wh, d_back_dht1, alpha, beta, false, false); // dwh = da.T * ht1 [3 * H, H] = [3 * H, N] * [N, H] Tensor<cpu, 2, DType> d_back_dwh(back_dwh, Shape2(3 * H, H)); Tensor<cpu, 2, DType> d_back_ht1(back_ht1 + H, Shape2(N, D * H)); Tensor<cpu, 3, DType> d_back_ht1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); d_back_ht1_tmp = reshape(d_back_ht1.T(), Shape3(D, H, N)); if (req_params == kAddTo) { beta = 2.0; // dwx = da.T * x [3 * H, I] = [3 * H, N] * [N, I] for AddTo Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I)); Tensor<cpu, 2, DType> d_dat(dat, Shape2(N, 3 * H)); Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(3 * H, I)); linalg_gemm(d_dat, d_xt, d_back_dwx, alpha, beta, true, false); } linalg_gemm(d_dart, d_back_ht1_tmp[0], d_back_dwh, alpha, beta, true, true); } } if (req_params != kNullOp) { // dbx = e * da [1, 3 * H] = [1, N] * [N, 3 * H] if (req_params != kAddTo) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 3 * H; ++i) { for (index_t j = 0; j < N * T; ++j) { back_dbx[i] += da[j * 3 * H + i]; back_dbh[i] += dar[j * 3 * H + i]; } } } else { const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H * 3, T)); const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + 3 * H * T, Shape2(H * 3, T)); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < H * T * 3; ++i) { tmp_dbx.dptr_[i] = 0; tmp_dbh.dptr_[i] = 0; } for (index_t t = T - 1; t >= 0; --t) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 3 * H; ++i) { for (index_t j = 0; j < N; ++j) { tmp_dbx[i][t] += da[t * N * 3 * H + j * 3 * H + i]; tmp_dbh[i][t] += dar[t * N * 3 * H + j * 3 * H + i]; } } #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 3 * H; ++i) { back_dbx[i] += tmp_dbx[i][t] + back_dbx[i]; back_dbh[i] += tmp_dbh[i][t] + back_dbh[i]; } } } } alpha = 1.0; beta = 1.0; // dxt = da * wx [T * N, I] = [T * N, 3 * H] * [3 * H, I] Tensor<cpu, 2, DType> d_da2(da, Shape2(T * N, 3 * H)); if (req_data != kNullOp) { Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I)); linalg_gemm(d_da2, back_wx, d_dx, alpha, beta, false, false); } alpha = 1.0; beta = 0.0; // dwx = da.T * x [3 * H, I] = [3 * H, T * N] * [T * N, I] if (req_params != kNullOp && req_params != kAddTo) { Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(3 * H, I)); linalg_gemm(d_da2, x, d_back_dwx, alpha, beta, true, false); } } if (req_state != kNullOp) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N * H * D; ++i) { dhx[i] = dht1[i]; } } } template <typename DType> void GruBackward(DType* ws, DType* rs, const int L, const int D, const index_t T, const index_t N, index_t I, const int H, DType* x_ptr, DType* hx_ptr, DType* w_ptr, DType* dy_ptr, DType* dhy_ptr, DType* dx_ptr, DType* dhx_ptr, DType* dw_ptr, int req_data, int req_params, int req_state, const float dropout) { DType* wx = w_ptr; DType* dwx = dw_ptr; DType* dwh = dwx + I * H * 3; DType* dbx = dwh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3) + (L - 1) * ((D + 1) * H) * H * 3 * D; DType* gateR_l = rs + (L - 1) * T * D * N * H; DType* gateZ_l = gateR_l + L * T * D * N * H; DType* gateN_l = gateZ_l + L * T * D * N * H; DType* y_l = gateN_l + L * T * D * N * H; DType* Mnh_l = y_l + L * T * N * H * D; DType* dropout_random = Mnh_l + L * D * T * N * H; DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H; DType* dx_l = tmp_buf + T * N * D * H + 3 * H * T * 2; DType* ws2 = dx_l + T * N * D * H; DType* wx_l = (L == 1) ? wx : wx + (L - 2) * D * (D + 1) * H * 3 * H + D * I * 3 * H + D * H * 3 * H; DType* wh_l = wx_l; if (L == 1) { wh_l = wh_l + I * H * 3; } else { wh_l = wh_l + (D * H) * H * 3; } DType* dhy_l = nullptr; if (dhy_ptr) dhy_l = dhy_ptr + (L - 1) * D * N * H; DType* dwx_l = (L == 1) ? dwx : dwx + (L - 2) * D * (D + 1) * H * 3 * H + D * I * 3 * H + D * H * 3 * H; DType* dwh_l = nullptr; if (L == 1) { dwh_l = dwx_l + I * H * 3; } else { dwh_l = dwx_l + (D * H) * H * 3; } DType* dbx_l = dbx + (L - 1) * D * 3 * H * 2; DType* dbh_l = dbx_l + 3 * H; DType* dhx_l = dhx_ptr + (L - 1) * D * N * H; DType* dy_l = dy_ptr; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(L, D * N, H)); index_t inputsize = I; DType* y_tmp = y_l - T * N * H * D; const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); for (int l = L - 1; l >= 0; --l) { if (l == 0) { I = inputsize; y_tmp = x_ptr; dx_l = dx_ptr; } else { I = D * H; } Tensor<cpu, 2, DType> hx_l = hx[l]; Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I)); GruBackwardSingleLayer<DType>(ws2, tmp_buf, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, y_l, dy_l, dhy_l, gateR_l, gateZ_l, gateN_l, Mnh_l, dx_l, dhx_l, dwx_l, dwh_l, dbx_l, dbh_l, req_data, req_params, req_state); if (dropout > 0.0f && l > 0 && req_data != kNullOp) { dropout_random = dropout_random - T * N * D * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < T * N * I; i++) { if (dropout_random[i] == 0) { dx_l[i] = 0; } else { dx_l[i] = dx_l[i] / (1.0f - dropout); } } } if (l > 0) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < T * N * H * D; ++i) { dy_l[i] = dx_l[i]; } gateR_l = gateR_l - T * D * N * H; gateZ_l = gateZ_l - T * D * N * H; gateN_l = gateN_l - T * D * N * H; Mnh_l = Mnh_l - T * D * N * H; dhx_l = dhx_l - D * N * H; if (dhy_l) dhy_l = dhy_l - D * N * H; y_l = y_l - T * N * H * D; y_tmp = y_tmp - T * N * H * D; if (l == 1) { wx_l = wx_l - (inputsize + H) * H * 3 * D; wh_l = wx_l + inputsize * 3 * H; dwx_l = dwx_l - (inputsize + H) * H * 3 * D; dwh_l = dwx_l + inputsize * 3 * H; } else { wx_l = wx_l - (I + H) * H * 3 * D; wh_l = wx_l + I * 3 * H; dwx_l = dwx_l - (I + H) * H * 3 * D; dwh_l = dwx_l + I * 3 * H; } dbx_l = dbx_l - D * 3 * H * 2; dbh_l = dbx_l + 3 * H; } } } template <typename DType> void VanillaRNNForwardInferenceSingleLayer(DType* ws, DType* tmp_buf, bool state_outputs, const int D, const index_t T, const index_t N, const index_t I, const int H, const Tensor<cpu, 2, DType>& x, const Tensor<cpu, 2, DType>& hx, DType* wx_ptr, DType* wh_ptr, DType* bx_ptr, DType* bh_ptr, DType* y_ptr, DType* hy_ptr, int mode) { DType* ht = y_ptr; DType* ht_1 = y_ptr; DType* back_ht_1 = y_ptr + (T - 1) * N * H * D + H; DType* back_ht = back_ht_1; DType* gemmC1 = ws; // [D, T, N, H] DType* gemmC2 = gemmC1 + D * T * N * H; // N * H DType* back_wx_ptr = wx_ptr + I * H + H * H; DType* back_wh_ptr = wh_ptr + I * H + H * H; DType* back_bx_ptr = (bx_ptr != nullptr) ? bx_ptr + H * 2 : nullptr; DType* back_bh_ptr = (bh_ptr != nullptr) ? bh_ptr + H * 2 : nullptr; DType* back_gemmC1 = gemmC1 + T * N * H; DType* gemmC1_t = gemmC1; const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I)); const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H)); const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(1, H)); const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(1, H)); const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H, I)); const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H, H)); const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(1, H)); const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(1, H)); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (D == 1) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * H + j] = hx[i][j]; } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * D * H + j] = hx[i][j]; back_ht_1[i * D * H + j] = hx[N + i][j]; } } Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, H)); Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, H)); Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, H)); // x * wx.T : [T * N, I] * [I, H] DType alpha = 1.0; DType beta = 0.0; linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true); if (D == 2) { linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true); } for (index_t t = 0; t < T; t++) { // perform the first direction, X * wx and H * wh for each step // ht-1 * wh, ht-1:[N, H] wh:[H, H] Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H)); if (D == 1) { linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true); } else { Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N)); linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true); } gemmC1_t = gemmC1 + t * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t tb = i * H; if (mode == 1) { ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + bx[0][j] + gemmC2[tb + j] + bh[0][j]); } else { ht[i * D * H + j] = relu(gemmC1_t[tb + j] + bx[0][j] + gemmC2[tb + j] + bh[0][j]); } } } ht_1 = ht; ht = ht + D * H * N; // perform the second direction if (D == 2) { gemmC1_t = back_gemmC1 + (T - 1 - t) * N * H; Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H)); Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N)); linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t tb = i * H; if (mode == 1) { back_ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + back_bx[0][j] + gemmC2[tb + j] + back_bh[0][j]); } else { back_ht[i * D * H + j] = relu(gemmC1_t[tb + j] + back_bx[0][j] + gemmC2[tb + j] + back_bh[0][j]); } } } back_ht_1 = back_ht; back_ht = back_ht - D * H * N; } } // copy last state to hy, from(N, H * D) to (D, N, H) if (state_outputs) { if (D == 1) { DType* y_start = y_ptr + (T - 1) * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * H + j]; } } else { DType* y_start = y_ptr + (T - 1) * N * H * D; DType* y_back_start = y_ptr + H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * D * H + j]; hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j]; } } } } template <typename DType> void VanillaRNNForwardInference(DType* ws, bool state_outputs, const int L, const int D, const index_t T, const index_t N, index_t I, const int H, DType* x_ptr, DType* hx_ptr, DType* w_ptr, DType* y_ptr, DType* hy_ptr, int mode) { DType* wx = w_ptr; DType* wh = wx + I * H; DType* bx = wh + H * H + (D - 1) * (H * H + I * H) + (L - 1) * ((D + 1) * H) * H * D; DType* bh = bx + H; DType* y_tmp = ws; DType* y_l = x_ptr; DType* tmp_buf = y_tmp + D * T * N * H; DType* ws2 = y_tmp + D * T * N * H + D * H * N; DType* wx_l = wx; DType* wh_l = wh; DType* bx_l = bx; DType* bh_l = bh; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H)); DType* hy_l = hy_ptr; for (int l = 0; l < L; l++) { Tensor<cpu, 2, DType> x_l(y_l, Shape2(T * N, I)); if ((L + l) % 2) { y_l = y_ptr; } else { y_l = y_tmp; } Tensor<cpu, 2, DType> hx_l = hx[D * l]; VanillaRNNForwardInferenceSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, bx_l, bh_l, y_l, hy_l, mode); hy_l = hy_l + D * N * H; bx_l = bx_l + H * D * 2; bh_l = bh_l + H * D * 2; wx_l = wx_l + I * H * D + H * H * D; if (l == 0) { I = D * H; } wh_l = wx_l + I * H; } } template <typename DType> void VanillaRNNForwardTrainingSingleLayer(DType* ws, DType* tmp_buf, bool state_outputs, const int D, const index_t T, const index_t N, const index_t I, const int H, const Tensor<cpu, 2, DType>& x, const Tensor<cpu, 2, DType>& hx, DType* wx_ptr, DType* wh_ptr, DType* bx_ptr, DType* bh_ptr, DType* gateN, DType* y_ptr, DType* hy_ptr, int mode) { DType* ht = y_ptr; DType* ht_1 = y_ptr; DType* back_ht_1 = y_ptr + (T - 1) * N * H * D + H; DType* back_ht = back_ht_1; DType* gemmC1 = ws; // [D, T, N, H] DType* gemmC2 = gemmC1 + D * T * N * H; // N * H DType* nt = gateN; DType* back_wx_ptr = wx_ptr + I * H + H * H; DType* back_wh_ptr = wh_ptr + I * H + H * H; DType* back_bx_ptr = (bx_ptr != nullptr) ? bx_ptr + H * 2 : nullptr; DType* back_bh_ptr = (bh_ptr != nullptr) ? bh_ptr + H * 2 : nullptr; DType* back_gateN = gateN + T * N * H; DType* back_gemmC1 = gemmC1 + T * N * H; DType* gemmC1_t = gemmC1; const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I)); const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H)); const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(1, H)); const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(1, H)); const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 1, I)); const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 1, H)); const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(1, H)); const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(1, H)); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (D == 1) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * H + j] = hx[i][j]; } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * D * H + j] = hx[i][j]; back_ht_1[i * D * H + j] = hx[N + i][j]; } } Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, H)); Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, H)); Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, H)); // x * wx.T : [T * N, I] * [I, H] DType alpha = 1.0; DType beta = 0.0; linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true); if (D == 2) { linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true); } for (index_t t = 0; t < T; t++) { // perform the first direction, X * wx and H * wh for each step // ht-1 * wh, ht-1:[N, H] wh:[H, H] Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H)); if (D == 1) { linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true); } else { Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N)); linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true); } nt = gateN + t * N * H; gemmC1_t = gemmC1 + t * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t tb = i * H; if (mode == 1) { nt[tb + j] = ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + bx[0][j] + gemmC2[tb + j] + bh[0][j]); } else { nt[tb + j] = gemmC1_t[tb + j] + bx[0][j] + gemmC2[tb + j] + bh[0][j]; ht[i * D * H + j] = relu(nt[tb + j]); } } } ht_1 = ht; ht = ht + D * H * N; // perform the second direction if (D == 2) { nt = back_gateN + (T - 1 - t) * N * H; gemmC1_t = back_gemmC1 + (T - 1 - t) * N * H; Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H)); Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N)); linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t tb = i * H; if (mode == 1) { nt[tb + j] = back_ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + back_bx[0][j] + gemmC2[tb + j] + back_bh[0][j]); } else { nt[tb + j] = gemmC1_t[tb + j] + back_bx[0][j] + gemmC2[tb + j] + back_bh[0][j]; back_ht[i * D * H + j] = relu(nt[tb + j]); } } } back_ht_1 = back_ht; back_ht = back_ht - D * H * N; } } // copy last state to hy, from(N, H * D) to (D, N, H) if (state_outputs) { if (D == 1) { DType* y_start = y_ptr + (T - 1) * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * H + j]; } } else { DType* y_start = y_ptr + (T - 1) * N * H * D; DType* y_back_start = y_ptr + H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * D * H + j]; hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j]; } } } } template <typename DType> void VanillaRNNForwardTraining(DType* ws, DType* rs, bool state_outputs, const int L, const int D, const index_t T, const index_t N, index_t I, const int H, DType* x_ptr, DType* hx_ptr, DType* w_ptr, DType* y_ptr, DType* hy_ptr, const float dropout, int mode, std::mt19937& rnd_engine) { // NOLINT(runtime/references) DType* wx = w_ptr; DType* wh = wx + I * H; DType* bx = wh + H * H + (D - 1) * (H * H + I * H) + (L - 1) * ((D + 1) * H) * H * D; DType* bh = bx + H; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H)); DType* hy_l = hy_ptr; DType* gateN_l = rs; DType* y_l = gateN_l + L * T * D * N * H; DType* dropout_random = y_l + L * D * T * N * H; DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H; DType* ws2 = tmp_buf + D * N * H; DType* wx_l = wx; DType* wh_l = wh; DType* bx_l = bx; DType* bh_l = bh; DType* y_tmp = x_ptr; const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); for (int l = 0; l < L; l++) { if (l != 0) { y_tmp = y_l; y_l = y_l + T * N * H * D; } if (dropout > 0.0f && l > 0) { std::uniform_real_distribution<float> distribution(0, 1); for (index_t i = 0; i < T * N * I; i++) { if (distribution(rnd_engine) < dropout) { dropout_random[(l - 1) * T * N * I + i] = 0; y_tmp[i] = 0; } else { dropout_random[(l - 1) * T * N * I + i] = 1.0f - dropout; y_tmp[i] = y_tmp[i] / (1.0f - dropout); } } } Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I)); Tensor<cpu, 2, DType> hx_l = hx[D * l]; VanillaRNNForwardTrainingSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, bx_l, bh_l, gateN_l, y_l, hy_l, mode); gateN_l = gateN_l + T * D * N * H; hy_l = hy_l + D * N * H; bx_l = bx_l + H * D * 2; bh_l = bh_l + H * D * 2; wx_l = wx_l + I * H * D + H * H * D; if (l == 0) { I = D * H; } wh_l = wx_l + I * H; } #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < T * N * H * D; ++i) { y_ptr[i] = y_l[i]; } } template <typename DType> void VanillaRNNBackwardSingleLayer(DType* ws, DType* tmp_buf, const int D, const index_t T, const index_t N, const index_t I, const int H, const Tensor<cpu, 2, DType>& x, const Tensor<cpu, 2, DType>& hx, DType* wx_ptr, DType* wh_ptr, DType* y_ptr, DType* dy_ptr, DType* dhy_ptr, DType* gateN, DType* dx, DType* dhx, DType* dwx, DType* dwh, DType* dbx, DType* dbh, int req_data, int req_params, int req_state, int mode) { DType* dyt; DType* ht1; // [N, D, H] DType* dart; DType* nt; DType* dar = ws; // [T, N, H] DType* dht1 = dar + T * N * H; // [D, N, H] DType* hx_ = dht1 + D * N * H; // [N, D, H] DType* back_ht1; DType* back_dht1 = dht1 + N * H; // [N, H] DType* back_gateN = gateN + T * N * H; DType* back_wx_ptr = wx_ptr + I * H + H * H; DType* back_wh_ptr = wh_ptr + I * H + H * H; DType* back_dwx = dwx + I * H + H * H; DType* back_dwh = dwh + I * H + H * H; DType* back_dbx = dbx + H * 2; DType* back_dbh = dbh + H * 2; DType alpha = 1.0; DType beta = 0.0; const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I)); const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H)); const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H, I)); const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H, H)); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (req_params != kNullOp && req_params != kAddTo) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < D * H * H; ++i) { dwh[i] = 0; } #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < D * H; ++i) { dbx[i] = 0; dbh[i] = 0; } } #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N * H; ++i) { if (dhy_ptr) { dht1[i] = dhy_ptr[i]; } else { dht1[i] = 0; } } #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { hx_[i * D * H + j] = hx[i][j]; } } if (D == 2) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N * H; ++i) { if (dhy_ptr) { back_dht1[i] = dhy_ptr[N * H + i]; } else { back_dht1[i] = 0; } } #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { hx_[i * D * H + H + j] = hx[N + i][j]; } } } for (index_t t = T - 1; t >= 0; --t) { if (t) { ht1 = y_ptr + (t - 1) * N * D * H; } else { ht1 = hx_; } // add dy[T, N, D, H] to dhy[D, N, H] dyt = dy_ptr + t * N * D * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { dht1[i * H + j] += dyt[i * D * H + j]; } } nt = gateN + t * N * H; dart = dar + t * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t id = i * H + j; if (mode == 1) { dart[id] = dht1[id] * (1 - nt[id] * nt[id]); } else { dart[id] = nt[id] > 0.0f ? static_cast<float>(dht1[id]) : 0.0f; } dht1[id] = 0; } } if (req_params != kNullOp) { alpha = 1.0; beta = 1.0; // dht1 = dart * wh [N, H] = [N, H] * [H, H] Tensor<cpu, 2, DType> d_dht1(dht1, Shape2(N, H)); Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, H)); linalg_gemm(d_dart, wh, d_dht1, alpha, beta, false, false); if (req_params == kAddTo) { beta = 2.0; // dwx = da.T * x [H, I] = [H, N] * [N, I] for AddTo Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I)); Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(H, I)); linalg_gemm(d_dart, d_xt, d_dwx, alpha, beta, true, false); } // dwh = dart.T * ht1 [H, H] = [H, N] * [N, H] Tensor<cpu, 2, DType> d_ht1(ht1, Shape2(N, D * H)); Tensor<cpu, 2, DType> d_dwh(dwh, Shape2(H, H)); Tensor<cpu, 3, DType> d_ht1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); d_ht1_tmp = reshape(d_ht1.T(), Shape3(D, H, N)); linalg_gemm(d_dart, d_ht1_tmp[0], d_dwh, alpha, beta, true, true); } } if (req_params != kNullOp) { // dbx = e * da [1, H] = [1, N] * [N, H] if (req_params != kAddTo) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < H; ++i) { for (index_t j = 0; j < N * T; ++j) { dbx[i] += dar[j * H + i]; dbh[i] = dbx[i]; } } } else { const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H, T)); const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + H * T, Shape2(H, T)); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < H * T; ++i) { tmp_dbx.dptr_[i] = 0; tmp_dbh.dptr_[i] = 0; } for (index_t t = T - 1; t >= 0; --t) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < H; ++i) { for (index_t j = 0; j < N; ++j) { tmp_dbx[i][t] += dar[t * N * H + j * H + i]; tmp_dbh[i][t] = tmp_dbx[i][t]; } } #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < H; ++i) { dbx[i] += tmp_dbx[i][t] + dbx[i]; dbh[i] = dbx[i]; } } } } alpha = 1.0; beta = 0.0; // dx = da * wx [T * N, I] = [T * N, H] * [H, I] Tensor<cpu, 2, DType> d_dar(dar, Shape2(T * N, H)); if (req_data != kNullOp) { Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I)); linalg_gemm(d_dar, wx, d_dx, alpha, beta, false, false); } // dwx = da.T * x [H, I] = [H, T * N] * [T * N, I] if (req_params != kNullOp && req_params != kAddTo) { Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(H, I)); linalg_gemm(d_dar, x, d_dwx, alpha, beta, true, false); } if (D == 2) { for (index_t t = 0; t < T; ++t) { if (t == T - 1) { back_ht1 = hx_; } else { back_ht1 = y_ptr + (t + 1) * N * D * H; } // add dy[T, N, D, H] to dhy[D, N, H] dyt = dy_ptr + t * N * D * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { back_dht1[i * H + j] += dyt[i * D * H + H + j]; } } nt = back_gateN + t * N * H; dart = dar + t * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t id = i * H + j; if (mode == 1) { dart[id] = back_dht1[id] * (1 - nt[id] * nt[id]); } else { dart[id] = nt[id] > 0.0f ? static_cast<float>(back_dht1[id]) : 0.0f; } back_dht1[id] = 0; } } if (req_params != kNullOp) { alpha = 1.0; beta = 1.0; // dht1 = da * wh [N, H] = [N, H] * [H, H] Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, H)); Tensor<cpu, 2, DType> d_back_dht1(back_dht1, Shape2(N, H)); linalg_gemm(d_dart, back_wh, d_back_dht1, alpha, beta, false, false); // dwh = da.T * ht1 [H, H] = [H, N] * [N, H] Tensor<cpu, 2, DType> d_back_dwh(back_dwh, Shape2(H, H)); Tensor<cpu, 2, DType> d_back_ht1(back_ht1 + H, Shape2(N, D * H)); Tensor<cpu, 3, DType> d_back_ht1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); d_back_ht1_tmp = reshape(d_back_ht1.T(), Shape3(D, H, N)); if (req_params == kAddTo) { beta = 2.0; // dwx = da.T * x [ H, I] = [H, N] * [N, I] for AddTo Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I)); Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(H, I)); linalg_gemm(d_dart, d_xt, d_back_dwx, alpha, beta, true, false); } linalg_gemm(d_dart, d_back_ht1_tmp[0], d_back_dwh, alpha, beta, true, true); } } if (req_params != kNullOp) { // dbx = e * da [1, H] = [1, N] * [N, H] if (req_params != kAddTo) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < H; ++i) { for (index_t j = 0; j < N * T; ++j) { back_dbx[i] += dar[j * H + i]; back_dbh[i] = back_dbx[i]; } } } else { const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H, T)); const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + H * T, Shape2(H, T)); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < H * T; ++i) { tmp_dbx.dptr_[i] = 0; tmp_dbh.dptr_[i] = 0; } for (index_t t = T - 1; t >= 0; --t) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < H; ++i) { for (index_t j = 0; j < N; ++j) { tmp_dbx[i][t] += dar[t * N * H + j * H + i]; tmp_dbh[i][t] = tmp_dbx[i][t]; } } #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < H; ++i) { back_dbx[i] += tmp_dbx[i][t] + back_dbx[i]; back_dbh[i] = back_dbx[i]; } } } } alpha = 1.0; beta = 1.0; // dxt = da * wx [T * N, I] = [T * N, H] * [H, I] Tensor<cpu, 2, DType> d_dar2(dar, Shape2(T * N, H)); if (req_data != kNullOp) { Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I)); linalg_gemm(d_dar2, back_wx, d_dx, alpha, beta, false, false); } alpha = 1.0; beta = 0.0; // dwx = da.T * x [H, I] = [H, T * N] * [T * N, I] if (req_params != kNullOp && req_params != kAddTo) { Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(H, I)); linalg_gemm(d_dar2, x, d_back_dwx, alpha, beta, true, false); } } if (req_state != kNullOp) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N * H * D; ++i) { dhx[i] = dht1[i]; } } } template <typename DType> void VanillaRNNBackward(DType* ws, DType* rs, const int L, const int D, const index_t T, const index_t N, index_t I, const int H, DType* x_ptr, DType* hx_ptr, DType* w_ptr, DType* dy_ptr, DType* dhy_ptr, DType* dx_ptr, DType* dhx_ptr, DType* dw_ptr, int req_data, int req_params, int req_state, const float dropout, int mode) { DType* wx = w_ptr; DType* dwx = dw_ptr; DType* dwh = dwx + I * H; DType* dbx = dwh + H * H + (D - 1) * (H * H + I * H) + (L - 1) * ((D + 1) * H) * H * D; DType* gateN_l = rs + (L - 1) * T * D * N * H; DType* y_l = gateN_l + L * T * D * N * H; DType* dropout_random = y_l + L * D * T * N * H; DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H; DType* dx_l = tmp_buf + T * N * D * H + H * T * 2; DType* ws2 = dx_l + T * N * D * H; DType* wx_l = (L == 1) ? wx : wx + (L - 2) * D * (D + 1) * H * H + D * I * H + D * H * H; DType* wh_l = wx_l; if (L == 1) { wh_l = wh_l + I * H; } else { wh_l = wh_l + (D * H) * H; } DType* dhy_l = nullptr; if (dhy_ptr) dhy_l = dhy_ptr + (L - 1) * D * N * H; DType* dwx_l = (L == 1) ? dwx : dwx + (L - 2) * D * (D + 1) * H * H + D * I * H + D * H * H; DType* dwh_l = nullptr; if (L == 1) { dwh_l = dwx_l + I * H; } else { dwh_l = dwx_l + (D * H) * H; } DType* dbx_l = dbx + (L - 1) * D * H * 2; DType* dbh_l = dbx_l + H; DType* dhx_l = dhx_ptr + (L - 1) * D * N * H; DType* dy_l = dy_ptr; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(L, D * N, H)); index_t inputsize = I; DType* y_tmp = y_l - T * N * H * D; const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); for (int l = L - 1; l >= 0; --l) { if (l == 0) { I = inputsize; y_tmp = x_ptr; dx_l = dx_ptr; } else { I = D * H; } Tensor<cpu, 2, DType> hx_l = hx[l]; Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I)); VanillaRNNBackwardSingleLayer<DType>(ws2, tmp_buf, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, y_l, dy_l, dhy_l, gateN_l, dx_l, dhx_l, dwx_l, dwh_l, dbx_l, dbh_l, req_data, req_params, req_state, mode); if (dropout > 0.0f && l > 0 && req_data != kNullOp) { dropout_random = dropout_random - T * N * D * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < T * N * I; i++) { if (dropout_random[i] == 0) { dx_l[i] = 0; } else { dx_l[i] = dx_l[i] / (1.0f - dropout); } } } if (l > 0) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < T * N * H * D; ++i) { dy_l[i] = dx_l[i]; } gateN_l = gateN_l - T * D * N * H; dhx_l = dhx_l - D * N * H; if (dhy_l) dhy_l = dhy_l - D * N * H; y_l = y_l - T * N * H * D; y_tmp = y_l; if (l == 1) { wx_l = wx_l - (inputsize + H) * H * D; wh_l = wx_l + inputsize * H; dwx_l = dwx_l - (inputsize + H) * H * D; dwh_l = dwx_l + inputsize * H; } else { wx_l = wx_l - (I + H) * H * D; wh_l = wx_l + I * H; dwx_l = dwx_l - (I + H) * H * D; dwh_l = dwx_l + I * H; } dbx_l = dbx_l - D * H * 2; dbh_l = dbx_l + H; } } } } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_RNN_IMPL_H_
GB_binop__isle_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_uint16) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__isle_uint16) // A.*B function (eWiseMult): GB (_AemultB_03__isle_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_uint16) // A*D function (colscale): GB (_AxD__isle_uint16) // D*A function (rowscale): GB (_DxB__isle_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__isle_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__isle_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_uint16) // C=scalar+B GB (_bind1st__isle_uint16) // C=scalar+B' GB (_bind1st_tran__isle_uint16) // C=A+scalar GB (_bind2nd__isle_uint16) // C=A'+scalar GB (_bind2nd_tran__isle_uint16) // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_UINT16 || GxB_NO_ISLE_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isle_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_uint16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_uint16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isle_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isle_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = Bx [p] ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = Ax [p] ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
reductionmissing-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A kernel for two level parallelizable loop with reduction if reduction(error) is missing, there is race condition. */ #include <stdlib.h> int main(int argc, char* argv[]) { int i,j; float temp, error; int len=100; if (argc>1) len = atoi(argv[1]); float u[len][len]; #pragma omp parallel for private (temp,i,j) for (i = 0; i < len; i++) for (j = 0; j < len; j++) { temp = u[i][j]; error = error + temp * temp; } }
valid.yolo6.src.h
#pragma once #include "ukr.h" #include "omp.h" #include "transpose.h" #include "gen_ukr_A6B2gemm_1_128_68_68_256_1_1.h" #include "gen_ukr_A4B2gemm_1_128_68_68_256_1_1.h" void testrun(float* A ,float*B, float*C, float*oriB ){ int tid = omp_get_thread_num(); int Nx = 68; int Ny = 68; int Nh = 1; long long Astrides[6] = {0,1,2,3,4,5}; int b1 = 0; for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){ for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){ transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16); transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16); } } #pragma omp barrier// begin push button generated block for(int xy5=0;xy5<4624+0;xy5+=4624) { for(int f5=0;f5<128+0;f5+=128) { for(int c5=0;c5<256+0;c5+=256) { for(int xy4=xy5;xy4<min(4624, 4624+xy5);xy4+=4624) { for(int f4=f5;f4<min(128, 128+f5);f4+=128) { for(int c4=c5;c4<min(256, 256+c5);c4+=256) { for(int xy3=xy4;xy3<min(4624, 4624+xy4);xy3+=Txy3) { for(int f3=f4;f3<min(128, 128+f4);f3+=Tf2) { for(int c3=c4;c3<min(256, 256+c4);c3+=Tc1) { for(int xy2=xy3;xy2<min(4624, Txy3+xy3);xy2+=6) { for(int f2=f3;f2<min(128, Tf2+f3);f2+=16) { for(int c2=c3;c2<min(256, Tc1+c3);c2+=Tc1) { for(int c1=c2;c1<min(256, Tc1+c2);c1+=Tc1) { for(int xy1=xy2;xy1<min(4624, 6+xy2);xy1+=6) { for(int f1=f2;f1<min(128, 16+f2);f1+=16) { int ctile=min(Tc1, 256-c1); int x1=xy1/68; int y1=xy1%68/1; int c1_1=c1/1; int c1_2=c1%1/1; int kf1_1=f1/16; int kf1_2=f1%16/1; int of1_1=f1/1; int of1_2=f1%1/1; int offsetA=0+b1*1183744+c1_1*4624+1*x1*68+1*y1*1+c1_2*1; int offsetB=0+kf1_1*4096+c1*16+0*16+0*16+kf1_2*1; int offsetC=0+b1*591872+of1_1*4624+x1*68+y1*1+of1_2*1; if(68-y1>=6){ cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } else if(68*68-xy1>=6){ for(int sti=68-y1;sti<6;sti+=1) { Astrides[sti]+=0; } cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); for(int sti=68-y1;sti<6;sti+=1) { Astrides[sti]-=0; } } else{ cnn_ukr_float_scatter_4x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } } } } } } } } } } } } } } } } // end push button generated block }
mixed_tentusscher_myo_epi_2004_S2_8.c
// Scenario 2 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) // (AP + max:dvdt) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S2_8.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5775905751090,0.00128750116236230,0.779935933436537,0.779723352637460,0.000174601580517662,0.485187163266569,0.00293835320030002,0.999998351150449,1.93050784561332e-08,1.88871626954558e-05,0.999770189484549,1.00686798673141,0.999990981534461,5.04809778641404e-05,0.633311900953308,9.21356412743337,140.293156217684}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={13.7252581949953,0.000291452087344709,0.000161170796436720,0.000633934538710454,0.279044052137589,0.174768168776027,0.153544471133922,3.79004728820943,0.0185410078648928,2.75737595448530,1090.40644729057,0.000413513792232501,0.271770874770752,0.0199966759287835,0.00318963944351548,4.77961669240113e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
polybench.c
/** * This version is stamped on May 10, 2016 * * Contact: * Louis-Noel Pouchet <pouchet.ohio-state.edu> * Tomofumi Yuki <tomofumi.yuki.fr> * * Web address: http://polybench.sourceforge.net */ /* polybench.c: this file is part of PolyBench/C */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <unistd.h> #include <assert.h> #include <time.h> #include <sys/time.h> #ifdef __linux__ #include <sys/resource.h> #elif _WIN32 // Do not include <sys/resource.h> #else #include <sys/resource.h> #endif #include <sched.h> #include <math.h> #ifdef _OPENMP # include <omp.h> #endif #if defined(POLYBENCH_PAPI) # undef POLYBENCH_PAPI # include "polybench.h" # define POLYBENCH_PAPI #else # include "polybench.h" #endif /* By default, collect PAPI counters on thread 0. */ #ifndef POLYBENCH_THREAD_MONITOR # define POLYBENCH_THREAD_MONITOR 0 #endif /* Total LLC cache size. By default 32+MB.. */ #ifndef POLYBENCH_CACHE_SIZE_KB # define POLYBENCH_CACHE_SIZE_KB 32770 #endif int polybench_papi_counters_threadid = POLYBENCH_THREAD_MONITOR; double polybench_program_total_flops = 0; #ifdef POLYBENCH_PAPI # include <papi.h> # define POLYBENCH_MAX_NB_PAPI_COUNTERS 96 char* _polybench_papi_eventlist[] = { #include "papi_counters.list" NULL }; int polybench_papi_eventset; int polybench_papi_eventlist[POLYBENCH_MAX_NB_PAPI_COUNTERS]; long_long polybench_papi_values[POLYBENCH_MAX_NB_PAPI_COUNTERS]; #endif /* * Allocation table, to enable inter-array padding. All data allocated * with polybench_alloc_data should be freed with polybench_free_data. * */ #define NB_INITIAL_TABLE_ENTRIES 512 struct polybench_data_ptrs { void** user_view; void** real_ptr; int nb_entries; int nb_avail_entries; }; static struct polybench_data_ptrs* _polybench_alloc_table = NULL; static size_t polybench_inter_array_padding_sz = 0; /* Timer code (gettimeofday). */ double polybench_t_start, polybench_t_end; /* Timer code (RDTSC). */ unsigned long long int polybench_c_start, polybench_c_end; static double rtclock() { #if defined(POLYBENCH_TIME) || defined(POLYBENCH_GFLOPS) struct timeval Tp; int stat; stat = gettimeofday (&Tp, NULL); if (stat != 0) printf ("Error return from gettimeofday: %d", stat); return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); #else return 0; #endif } #ifdef POLYBENCH_CYCLE_ACCURATE_TIMER static unsigned long long int rdtsc() { unsigned long long int ret = 0; unsigned int cycles_lo; unsigned int cycles_hi; __asm__ volatile ("RDTSC" : "=a" (cycles_lo), "=d" (cycles_hi)); ret = (unsigned long long int)cycles_hi << 32 | cycles_lo; return ret; } #endif void polybench_flush_cache() { int cs = POLYBENCH_CACHE_SIZE_KB * 1024 / sizeof(double); double* flush = (double*) calloc (cs, sizeof(double)); int i; double tmp = 0.0; #ifdef _OPENMP //#pragma omp parallel for reduction(+:tmp) private(i) #endif for (i = 0; i < cs; i++) tmp += flush[i]; assert (tmp <= 10.0); free (flush); } #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER void polybench_linux_fifo_scheduler() { /* Use FIFO scheduler to limit OS interference. Program must be run as root, and this works only for Linux kernels. */ struct sched_param schedParam; schedParam.sched_priority = sched_get_priority_max (SCHED_FIFO); sched_setscheduler (0, SCHED_FIFO, &schedParam); } void polybench_linux_standard_scheduler() { /* Restore to standard scheduler policy. */ struct sched_param schedParam; schedParam.sched_priority = sched_get_priority_max (SCHED_OTHER); sched_setscheduler (0, SCHED_OTHER, &schedParam); } #endif #ifdef POLYBENCH_PAPI static void test_fail(char *file, int line, char *call, int retval) { char buf[128]; memset(buf, '\0', sizeof(buf)); if (retval != 0) fprintf (stdout, "%-40s FAILED\nLine # %d\n", file, line); else { fprintf (stdout, "%-40s SKIPPED\n", file); fprintf (stdout, "Line # %d\n", line); } if (retval == PAPI_ESYS) { sprintf (buf, "System error in %s", call); perror (buf); } else if (retval > 0) fprintf (stdout, "Error: %s\n", call); else if (retval == 0) fprintf (stdout, "Error: %s\n", call); else { char errstring[PAPI_MAX_STR_LEN]; // PAPI 5.4.3 has changed the API for PAPI_perror. #if defined (PAPI_VERSION) && ((PAPI_VERSION_MAJOR(PAPI_VERSION) == 5 && PAPI_VERSION_MINOR(PAPI_VERSION) >= 4) || PAPI_VERSION_MAJOR(PAPI_VERSION) > 5) fprintf (stdout, "Error in %s: %s\n", call, PAPI_strerror(retval)); #else PAPI_perror (retval, errstring, PAPI_MAX_STR_LEN); fprintf (stdout, "Error in %s: %s\n", call, errstring); #endif } fprintf (stdout, "\n"); if (PAPI_is_initialized ()) PAPI_shutdown (); exit (1); } void polybench_papi_init() { # ifdef _OPENMP //#pragma omp parallel { #pragma omp master { if (omp_get_max_threads () < polybench_papi_counters_threadid) polybench_papi_counters_threadid = omp_get_max_threads () - 1; } #pragma omp barrier if (omp_get_thread_num () == polybench_papi_counters_threadid) { # endif int retval; polybench_papi_eventset = PAPI_NULL; if ((retval = PAPI_library_init (PAPI_VER_CURRENT)) != PAPI_VER_CURRENT) test_fail (__FILE__, __LINE__, "PAPI_library_init", retval); if ((retval = PAPI_create_eventset (&polybench_papi_eventset)) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_create_eventset", retval); int k; for (k = 0; _polybench_papi_eventlist[k]; ++k) { if ((retval = PAPI_event_name_to_code (_polybench_papi_eventlist[k], &(polybench_papi_eventlist[k]))) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_event_name_to_code", retval); } polybench_papi_eventlist[k] = 0; # ifdef _OPENMP } } #pragma omp barrier # endif } void polybench_papi_close() { # ifdef _OPENMP //#pragma omp parallel { if (omp_get_thread_num () == polybench_papi_counters_threadid) { # endif int retval; if ((retval = PAPI_destroy_eventset (&polybench_papi_eventset)) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_destroy_eventset", retval); if (PAPI_is_initialized ()) PAPI_shutdown (); # ifdef _OPENMP } } #pragma omp barrier # endif } int polybench_papi_start_counter(int evid) { # ifndef POLYBENCH_NO_FLUSH_CACHE polybench_flush_cache(); # endif # ifdef _OPENMP # pragma omp parallel { if (omp_get_thread_num () == polybench_papi_counters_threadid) { # endif int retval = 1; char descr[PAPI_MAX_STR_LEN]; PAPI_event_info_t evinfo; PAPI_event_code_to_name (polybench_papi_eventlist[evid], descr); if (PAPI_add_event (polybench_papi_eventset, polybench_papi_eventlist[evid]) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_add_event", 1); if (PAPI_get_event_info (polybench_papi_eventlist[evid], &evinfo) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_get_event_info", retval); if ((retval = PAPI_start (polybench_papi_eventset)) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_start", retval); # ifdef _OPENMP } } #pragma omp barrier # endif return 0; } void polybench_papi_stop_counter(int evid) { # ifdef _OPENMP # pragma omp parallel { if (omp_get_thread_num () == polybench_papi_counters_threadid) { # endif int retval; long_long values[1]; values[0] = 0; if ((retval = PAPI_read (polybench_papi_eventset, &values[0])) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_read", retval); if ((retval = PAPI_stop (polybench_papi_eventset, NULL)) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_stop", retval); polybench_papi_values[evid] = values[0]; if ((retval = PAPI_remove_event (polybench_papi_eventset, polybench_papi_eventlist[evid])) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_remove_event", retval); # ifdef _OPENMP } } #pragma omp barrier # endif } void polybench_papi_print() { int verbose = 0; # ifdef _OPENMP # pragma omp parallel { if (omp_get_thread_num() == polybench_papi_counters_threadid) { #ifdef POLYBENCH_PAPI_VERBOSE verbose = 1; #endif if (verbose) printf ("On thread %d:\n", polybench_papi_counters_threadid); #endif int evid; for (evid = 0; polybench_papi_eventlist[evid] != 0; ++evid) { if (verbose) printf ("%s=", _polybench_papi_eventlist[evid]); printf ("%llu ", polybench_papi_values[evid]); if (verbose) printf ("\n"); } printf ("\n"); # ifdef _OPENMP } } #pragma omp barrier # endif } #endif /* ! POLYBENCH_PAPI */ void polybench_prepare_instruments() { #ifndef POLYBENCH_NO_FLUSH_CACHE polybench_flush_cache (); #endif #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER polybench_linux_fifo_scheduler (); #endif } void polybench_timer_start() { polybench_prepare_instruments (); #ifndef POLYBENCH_CYCLE_ACCURATE_TIMER polybench_t_start = rtclock (); #else polybench_c_start = rdtsc (); #endif } void polybench_timer_stop() { #ifndef POLYBENCH_CYCLE_ACCURATE_TIMER polybench_t_end = rtclock (); #else polybench_c_end = rdtsc (); #endif #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER polybench_linux_standard_scheduler (); #endif } void polybench_timer_print() { #ifdef POLYBENCH_GFLOPS if (polybench_program_total_flops == 0) { printf ("[PolyBench][WARNING] Program flops not defined, use polybench_set_program_flops(value)\n"); printf ("%0.6lf\n", polybench_t_end - polybench_t_start); } else printf ("%0.2lf\n", (polybench_program_total_flops / (double)(polybench_t_end - polybench_t_start)) / 1000000000); #else # ifndef POLYBENCH_CYCLE_ACCURATE_TIMER printf ("%0.6f\n", polybench_t_end - polybench_t_start); # else printf ("%Ld\n", polybench_c_end - polybench_c_start); # endif #endif } /* * These functions are used only if the user defines a specific * inter-array padding. It grows a global structure, * _polybench_alloc_table, which keeps track of the data allocated via * polybench_alloc_data (on which inter-array padding is applied), so * that the original, non-shifted pointer can be recovered when * calling polybench_free_data. * */ #ifdef POLYBENCH_ENABLE_INTARRAY_PAD static void grow_alloc_table() { if (_polybench_alloc_table == NULL || (_polybench_alloc_table->nb_entries % NB_INITIAL_TABLE_ENTRIES) != 0 || _polybench_alloc_table->nb_avail_entries != 0) { /* Should never happen if the API is properly used. */ fprintf (stderr, "[ERROR] Inter-array padding requires to use polybench_alloc_data and polybench_free_data\n"); exit (1); } size_t sz = _polybench_alloc_table->nb_entries; sz += NB_INITIAL_TABLE_ENTRIES; _polybench_alloc_table->user_view = realloc (_polybench_alloc_table->user_view, sz * sizeof(void*)); assert(_polybench_alloc_table->user_view != NULL); _polybench_alloc_table->real_ptr = realloc (_polybench_alloc_table->real_ptr, sz * sizeof(void*)); assert(_polybench_alloc_table->real_ptr != NULL); _polybench_alloc_table->nb_avail_entries = NB_INITIAL_TABLE_ENTRIES; } static void* register_padded_pointer(void* ptr, size_t orig_sz, size_t padded_sz) { if (_polybench_alloc_table == NULL) { fprintf (stderr, "[ERROR] Inter-array padding requires to use polybench_alloc_data and polybench_free_data\n"); exit (1); } if (_polybench_alloc_table->nb_avail_entries == 0) grow_alloc_table (); int id = _polybench_alloc_table->nb_entries++; _polybench_alloc_table->real_ptr[id] = ptr; _polybench_alloc_table->user_view[id] = ptr + (padded_sz - orig_sz); return _polybench_alloc_table->user_view[id]; } static void free_data_from_alloc_table (void* ptr) { if (_polybench_alloc_table != NULL && _polybench_alloc_table->nb_entries > 0) { int i; for (i = 0; i < _polybench_alloc_table->nb_entries; ++i) if (_polybench_alloc_table->user_view[i] == ptr || _polybench_alloc_table->real_ptr[i] == ptr) break; if (i != _polybench_alloc_table->nb_entries) { free (_polybench_alloc_table->real_ptr[i]); for (; i < _polybench_alloc_table->nb_entries - 1; ++i) { _polybench_alloc_table->user_view[i] = _polybench_alloc_table->user_view[i + 1]; _polybench_alloc_table->real_ptr[i] = _polybench_alloc_table->real_ptr[i + 1]; } _polybench_alloc_table->nb_entries--; _polybench_alloc_table->nb_avail_entries++; if (_polybench_alloc_table->nb_entries == 0) { free (_polybench_alloc_table->user_view); free (_polybench_alloc_table->real_ptr); free (_polybench_alloc_table); _polybench_alloc_table = NULL; } } } } static void check_alloc_table_state() { if (_polybench_alloc_table == NULL) { _polybench_alloc_table = (struct polybench_data_ptrs*) malloc (sizeof(struct polybench_data_ptrs)); assert(_polybench_alloc_table != NULL); _polybench_alloc_table->user_view = (void**) malloc (sizeof(void*) * NB_INITIAL_TABLE_ENTRIES); assert(_polybench_alloc_table->user_view != NULL); _polybench_alloc_table->real_ptr = (void**) malloc (sizeof(void*) * NB_INITIAL_TABLE_ENTRIES); assert(_polybench_alloc_table->real_ptr != NULL); _polybench_alloc_table->nb_entries = 0; _polybench_alloc_table->nb_avail_entries = NB_INITIAL_TABLE_ENTRIES; } } #endif // !POLYBENCH_ENABLE_INTARRAY_PAD static void* xmalloc(size_t alloc_sz) { void* ret = NULL; /* By default, post-pad the arrays. Safe behavior, but likely useless. */ polybench_inter_array_padding_sz += POLYBENCH_INTER_ARRAY_PADDING_FACTOR; size_t padded_sz = alloc_sz + polybench_inter_array_padding_sz; int err = posix_memalign (&ret, 4096, padded_sz); if (! ret || err) { fprintf (stderr, "[PolyBench] posix_memalign: cannot allocate memory"); exit (1); } /* Safeguard: this is invoked only if polybench.c has been compiled with inter-array padding support from polybench.h. If so, move the starting address of the allocation and return it to the user. The original pointer is registered in an allocation table internal to polybench.c. Data must then be freed using polybench_free_data, which will inspect the allocation table to free the original pointer.*/ #ifdef POLYBENCH_ENABLE_INTARRAY_PAD /* This moves the 'ret' pointer by (padded_sz - alloc_sz) positions, and registers it in the lookup table for future free using polybench_free_data. */ ret = register_padded_pointer(ret, alloc_sz, padded_sz); #endif return ret; } void polybench_free_data(void* ptr) { #ifdef POLYBENCH_ENABLE_INTARRAY_PAD free_data_from_alloc_table (ptr); #else free (ptr); #endif } void* polybench_alloc_data(unsigned long long int n, int elt_size) { #ifdef POLYBENCH_ENABLE_INTARRAY_PAD check_alloc_table_state (); #endif /// FIXME: detect overflow! size_t val = n; val *= elt_size; void* ret = xmalloc (val); return ret; }
GB_unop__isnan_bool_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__isnan_bool_fp64) // op(A') function: GB (_unop_tran__isnan_bool_fp64) // C type: bool // A type: double // cast: double cij = (aij) // unaryop: cij = isnan (aij) #define GB_ATYPE \ double #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = isnan (x) ; // casting #define GB_CAST(z, aij) \ double z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (aij) ; \ Cx [pC] = isnan (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNAN || GxB_NO_BOOL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__isnan_bool_fp64) ( bool *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = (aij) ; Cx [p] = isnan (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = (aij) ; Cx [p] = isnan (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__isnan_bool_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fio.h
#include <string.h> #include <stdlib.h> #include <byteswap.h> #include <omp.h> namespace fun3d { const unsigned int THRESHOLD = 100; template<typename T> T __bswap(T val) { /* unsigned int */ if(sizeof(T) == 4) return(__bswap_32((unsigned int)val)); else { T val_; const size_t sz = sizeof(T); const size_t sz_ = sz - 1; char * cval = (char *) &val; char * cval_ = (char *) &val_; for(unsigned int i = 0; i < sz; i++) cval_[i] = cval[sz_- i]; for(unsigned int i = 0; i < sz; i++) cval[i] = cval_[i]; return val; } } template<typename T> void walk(char *l, const char *h, T *b) { for(; l < h; l += sizeof(T)) { T k; memcpy(&k, l, sizeof(T)); *(b++) = __bswap<T>(k); } } template<typename T> void walk(char *l, const char *h, const size_t sz, T *b) { if(sz <= THRESHOLD) walk<T>(l, h, b); else { const size_t sz_ = sz / 2; char *m = l + (sz_ * sizeof(T)); char *k = (char *)b + (sz_ * sizeof(T)); #pragma omp task walk<T>(l, m, sz_, b); walk<T>(m, h, (sz-sz_), (T *)k); #pragma omp taskwait } } template<typename T> void walkfbuf(char *l, const char *h, const size_t sz, T *b) { #pragma omp parallel { #pragma omp single { walk<T>(l, h, sz, b); } } } };
omp2-1.c
#include<math.h> #include<stdio.h> int main() { int i; double v = 0.0; #pragma omp parallel for firstprivate(v) lastprivate(v) for (i = 0; i < 1000000000; i++) { v += sqrt(i); } printf("%lf\n", v); return 0; }
PoW.c
// Copyright (c) 2016-2018 HDCH Foundation Ltd. #include "PoW.h" #include <stdio.h> #include <stdint.h> #include <string.h> #include <stdlib.h> #include <assert.h> #ifndef MAC_OSX #include <omp.h> #endif #include "my_time.h" #include "common.h" #include "my_rand48_r.h" #include "oneWayFunction.h" // #define SSE_VERSION /* * Step 1: Initialize working memory. */ void initWorkMemory(uint8_t *input, uint32_t inputLen, uint8_t *Maddr, const uint32_t K) { uint32_t i, j; uint8_t a[OUTPUT_LEN], b[OUTPUT_LEN]; funcInfor[0].func(input, inputLen, a); uint64_t randSeed[4] = {0, 0, 0, 0}; #ifndef SSE_VERSION struct my_rand48_data randBuffer[4]; #else struct vrand48_data randBuffer[2]; #endif const uint32_t iterNum = WORK_MEMORY_SIZE >> 5; for (i = 0; i < iterNum; ++i) { if (i % K) { #ifndef SSE_VERSION uint64_t num = 0; for (j = 0; j < 4; ++j) { my_rand64_r(&randBuffer[j], &num); memcpy(b + (j << 3), (uint8_t *)&num, 8*sizeof(uint8_t)); } #else vrand64(b, randBuffer); #endif uint8_t shift_num; uint8_t result[OUTPUT_LEN]; reduce_bit((uint8_t *)&i, 4, (uint8_t *)&shift_num, 8); rrs(b, OUTPUT_LEN, result, shift_num); memcpy(Maddr + (i << 5), result, OUTPUT_LEN*sizeof(uint8_t)); for (j = 0; j < 32; ++j) { a[j] ^= result[j]; } } else { uint8_t t = 0, shift_num = 0; reduce_bit(a, 32, (uint8_t *)&t, 8); t = (t & 0x0f) ^ (t >> 4); reduce_bit((uint8_t *)&i, 4, (uint8_t *)&shift_num, 8); uint8_t a_rrs[INPUT_LEN]; rrs(a, OUTPUT_LEN, a_rrs, shift_num); funcInfor[t].func(a_rrs, 32, a); reduce_bit(a, 8, (uint8_t *)&randSeed[0], 48); reduce_bit(a + 8, 8, (uint8_t *)&randSeed[1], 48); reduce_bit(a + 16, 8, (uint8_t *)&randSeed[2], 48); reduce_bit(a + 24, 8, (uint8_t *)&randSeed[3], 48); #ifndef SSE_VERSION my_seed48_r(randSeed[0], &randBuffer[0]); my_seed48_r(randSeed[1], &randBuffer[1]); my_seed48_r(randSeed[2], &randBuffer[2]); my_seed48_r(randSeed[3], &randBuffer[3]); #else vseed48(randSeed , &randBuffer[0]); vseed48(randSeed + 2, &randBuffer[1]); #endif memcpy(Maddr + (i << 5), a, 32*sizeof(uint8_t)); } } } /* * Step 2: Modify the working memory contents. */ void modifyWorkMemory(uint8_t *Maddr, const uint32_t L, const uint32_t C, uint8_t *result) { uint32_t i, j; uint8_t a[OUTPUT_LEN], b[64]; funcInfor[0].func(Maddr + WORK_MEMORY_SIZE - 32, 32, a); memcpy(result, a, OUTPUT_LEN*sizeof(uint8_t)); uint64_t r = 0; reduce_bit(a, 32, (uint8_t *)&r, 64); const uint32_t iterNum = L << 6; for (i = 0; i < C; ++i) { uint64_t randSeed = 0; reduce_bit(a, 32, (uint8_t *)&randSeed, 48); struct my_rand48_data randBuffer; my_seed48_r(randSeed, &randBuffer); uint8_t t1, t2, s; uint64_t randNum = 0, base = 0; for (j = 0; j < iterNum; ++j) { my_rand48_r(&randBuffer, &randNum); base = randNum + r; uint64_t offset = 0; reduce_bit((uint8_t *)&r, 8, (uint8_t *)&offset, 8); offset = (offset << 8) + 1; uint64_t addr1 = (base + WORK_MEMORY_SIZE - offset) % WORK_MEMORY_SIZE; uint64_t addr2 = (base + offset) % WORK_MEMORY_SIZE; t1 = Maddr[addr1]; t2 = Maddr[addr2]; s = a[j & 0x1f]; Maddr[addr1] = t2 ^ s; Maddr[addr2] = t1 ^ s; b[j & 0x3f] = t1 ^ t2; r = r + s + t1 + t2; } uint8_t t = 0; reduce_bit((uint8_t *)&r, 8, (uint8_t *)&t, 8); t = (t & 0x0f) ^ (t >> 4); reduce_bit(b, 64, a, 256); uint8_t shift_num = 0; uint64_t ir = r + i; reduce_bit((uint8_t *)&ir, 8, (uint8_t *)&shift_num, 8); uint8_t a_rrs[INPUT_LEN]; rrs(a, OUTPUT_LEN, a_rrs, shift_num); funcInfor[t].func(a_rrs, 32, a); for (j = 0; j < OUTPUT_LEN; ++j) { result[j] ^= a[j]; } } } /* * Step 3: Calculate the final result. */ void calculateFinalResult(uint8_t *Maddr, uint8_t *c, const uint32_t D, uint8_t *result) { uint32_t i = 0, j = 0, k = 0; memcpy(result, c, OUTPUT_LEN*sizeof(uint8_t)); const uint32_t num = (WORK_MEMORY_SIZE >> 5) - 1; uint32_t it = 0; uint8_t result_rrs[OUTPUT_LEN]; while(1) { uint8_t t = 0, shift_num = 0; uint32_t d = 0; reduce_bit(result, 32, (uint8_t *)&t, 8); t = (t & 0x0f) ^ (t >> 4); reduce_bit(result, 32, (uint8_t *)&d, D); ++d; for (j = 0; j < d; ++j) { uint32_t index = i << 5; for (k = 0; k < 32; ++k) { result[k] ^= Maddr[index + k]; } ++i; if (i == num) { it = i + t; reduce_bit((uint8_t *)&it, 4, (uint8_t *)&shift_num, 8); rrs(result, OUTPUT_LEN, result_rrs, shift_num); funcInfor[0].func(result_rrs, 32, result); return; } } it = t + i; reduce_bit((uint8_t *)&it, 4, (uint8_t *)&shift_num, 8); rrs(result, OUTPUT_LEN, result_rrs, shift_num); funcInfor[t].func(result_rrs, 32, result); } } /* * Correctness & Performance test for Proof of work */ void testPowFunction(uint8_t *mess, uint32_t messLen, const int64_t iterNum) { int64_t j; uint32_t inputLen = messLen; uint8_t input[INPUT_LEN], output[OUTPUT_LEN]; memset(input, 0, INPUT_LEN*sizeof(uint8_t)); memcpy(input, mess, messLen*sizeof(char)); // Init all one-way function initOneWayFunction(); uint8_t *Maddr = (uint8_t *)malloc(64 * WORK_MEMORY_SIZE*sizeof(uint8_t)); assert(NULL != Maddr); memset(Maddr, 0, 64 * WORK_MEMORY_SIZE*sizeof(uint8_t)); printf("****************************** Correctness test (PoW function) ******************************\n"); printf("Test message: %s\n", mess); powFunction(input, inputLen, Maddr, output); view_data_u8("PoW", output, OUTPUT_LEN); printf("*********************************************************************************************\n"); /* printf("*************************************************** Performance test (PoW function) ***************************************************\n"); uint8_t *result = (uint8_t *)malloc(iterNum * OUTPUT_LEN * sizeof(uint8_t)); assert(NULL != result); memset(result, 0, iterNum * OUTPUT_LEN * sizeof(uint8_t)); uint32_t threadNumArr[] = {1, 4, 8, 12, 16, 20, 24, 32, 48, 64}; uint32_t threadNumTypes = sizeof(threadNumArr) / sizeof(uint32_t); printf(" %-18s", "Algorithm"); for (uint32_t ix = 0; ix < threadNumTypes; ++ix) printf("%12d", threadNumArr[ix]); printf("\n"); printf("00 %-18s\t", "PoW"); for (uint32_t ix = 0; ix < threadNumTypes; ++ix) { omp_set_num_threads(threadNumArr[ix]); double startTime = get_wall_time(); if (threadNumArr[ix] == 1) { for (j = 0; j < iterNum; ++j) { powFunction(input, inputLen, Maddr, result + j * OUTPUT_LEN); } } else { #pragma omp parallel for firstprivate(input), private(j) shared(result) for (j = 0; j < iterNum; ++j) { powFunction(input, inputLen, Maddr + omp_get_thread_num() * WORK_MEMORY_SIZE, result + j * OUTPUT_LEN); } } double endTime = get_wall_time(); double costTime = endTime - startTime; printf("%5.0f bps ", iterNum / costTime); fflush(stdout); // Check result for (j = 0; j < iterNum; j += 1) { if (memcmp(output, result + j * OUTPUT_LEN, OUTPUT_LEN)) { printf("Thread num: %d, j: %ld\n", threadNumArr[ix], j); view_data_u8("output", output, OUTPUT_LEN); view_data_u8("result", result + j * OUTPUT_LEN, OUTPUT_LEN); abort(); } } } printf("\n"); printf("***************************************************************************************************************************************\n"); if (NULL != result) { free(result); result = NULL; } */ if (NULL != Maddr) { free(Maddr); Maddr = NULL; } } #define OUTPUT_BUFFER_SIZE (32 * 1024UL * 1024UL) #define MAX_TEST_INPUT_LEN 140 #define MAX_OUT_FILE_NAME_LEN 25 const char testInputCase[][MAX_TEST_INPUT_LEN] = { "", "HelloWorld", "0123456789" }; void powNistTest(const char *outFileName) { const uint64_t iterNum = 1024UL * 1024UL; // const uint64_t iterNum = 1024UL; uint8_t *outputBuffer = (uint8_t *)malloc(OUTPUT_BUFFER_SIZE * sizeof(uint8_t)); assert(NULL != outputBuffer); memset(outputBuffer, 0, OUTPUT_BUFFER_SIZE * sizeof(uint8_t)); uint8_t *Maddr = (uint8_t *)malloc(WORK_MEMORY_SIZE*sizeof(uint8_t)); assert(NULL != Maddr); memset(Maddr, 0, WORK_MEMORY_SIZE*sizeof(uint8_t)); initOneWayFunction(); uint32_t testInputCaseNum = sizeof(testInputCase) / sizeof(const char [MAX_TEST_INPUT_LEN]); for (uint32_t testCaseIx = 0; testCaseIx < testInputCaseNum; ++testCaseIx) { char curOutFileName[MAX_OUT_FILE_NAME_LEN] = ""; sprintf(curOutFileName, "%s-%u.txt", outFileName, testCaseIx); FILE *fp = NULL; if (NULL != (fp = fopen(curOutFileName, "wb"))) { const uint32_t testInputCaseLen = strlen((char *)testInputCase[testCaseIx]); uint8_t input[MAX_TEST_INPUT_LEN]; memset(input, 0, MAX_TEST_INPUT_LEN*sizeof(uint8_t)); memcpy(input, testInputCase[testCaseIx], testInputCaseLen*sizeof(uint8_t)); double startTime = get_wall_time(); powFunction(input, testInputCaseLen, Maddr, outputBuffer); for (uint64_t i = 1, j = 0; i < iterNum; ++i) { memcpy(input, outputBuffer + j, OUTPUT_LEN * sizeof(uint32_t)); j += OUTPUT_LEN; powFunction(input, OUTPUT_LEN, Maddr, outputBuffer + j); /* if (j == OUTPUT_BUFFER_SIZE) { fwrite(outputBuffer, sizeof(uint8_t), OUTPUT_BUFFER_SIZE / sizeof(uint8_t), fp); j = 0; } */ } double endTime = get_wall_time(); double costTime = endTime - startTime; fprintf(stdout, "TestCaseIx: %d, Input: %s, IterNum: %llu, Time: %4.2f, Performance: %5.2f bps\n", testCaseIx, \ testInputCase[testCaseIx], iterNum, costTime, ((double)(iterNum * OUTPUT_LEN)) / costTime); fflush(stdout); fwrite(outputBuffer, sizeof(uint8_t), OUTPUT_BUFFER_SIZE / sizeof(uint8_t), fp); fclose(fp); } else { fprintf(stderr, "Error: Open %s failed!\n", curOutFileName); abort(); } } if (NULL != outputBuffer) { free(outputBuffer); outputBuffer = NULL; } if (NULL != Maddr) { free(Maddr); Maddr = NULL; } } void helloHash(const uint8_t *mess, uint32_t messLen, uint8_t output[OUTPUT_LEN]) { if(messLen != INPUT_LEN) { //won't get in printf("helloHash:Invalid message length %d\n", messLen); return; } int64_t j; uint32_t inputLen =messLen; uint8_t input[INPUT_LEN]; memset(input, 0, INPUT_LEN*sizeof(uint8_t)); memcpy(input, mess, inputLen*sizeof(char)); //operation: input uint8_t *Maddr = (uint8_t *)malloc(WORK_MEMORY_SIZE*sizeof(uint8_t)); //1024*1024*1 assert(NULL != Maddr); memset(Maddr, 0, WORK_MEMORY_SIZE*sizeof(uint8_t)); //printf("Test message: %s\n", mess); powFunction(input, inputLen,Maddr, output); //view_data_u8("PoW", output, OUTPUT_LEN); //output if (NULL != Maddr) { free(Maddr); Maddr = NULL; } } int my_rand64_r (struct my_rand48_data *buffer, uint64_t *result) { uint64_t X = buffer->__x; X = (X * buffer->__a + buffer->__c) & 0xffffffffffffULL; buffer->__x = X; buffer->__x = (X * buffer->__a + buffer->__c) & 0xffffffffffffULL; X ^= buffer->__x << 16; *result = X; return 0; } int my_seed48_r (uint64_t seedval, struct my_rand48_data *buffer) { buffer->__x = seedval & 0xffffffffffffULL; buffer->__a = 0x5deece66dULL; buffer->__c = 0xb; return 0; } void powFunction(uint8_t *input, uint32_t inputLen, uint8_t *Maddr, uint8_t *output) { uint8_t c[OUTPUT_LEN]; // Step 1: Initialize working memory. initWorkMemory(input, inputLen, Maddr, 128); // view_data_u8("Maddr", Maddr, OUTPUT_LEN); // Step 2: Modify the working memory contents. modifyWorkMemory(Maddr, 4, WORK_MEMORY_SIZE >> 11, c); // view_data_u8("c", c, OUTPUT_LEN); // Step 3: Calculate the final result. calculateFinalResult(Maddr, c, 8, output); // view_data_u8("output", output, OUTPUT_LEN); } int my_rand48_r (struct my_rand48_data *buffer, uint64_t *result) { *result = (buffer->__x * buffer->__a + buffer->__c) & 0xffffffffffffULL; buffer->__x = *result; return 0; }
dft.c
// Copyright Naoki Shibata and contributors 2010 - 2020. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <assert.h> #include <signal.h> #include <setjmp.h> #if defined(POWER64_UNDEF_USE_EXTERN_INLINES) // This is a workaround required to cross compile for PPC64 binaries #include <features.h> #ifdef __USE_EXTERN_INLINES #undef __USE_EXTERN_INLINES #endif #endif #include <math.h> #include "sleef.h" #include "misc.h" #include "common.h" #include "arraymap.h" #include "dftcommon.h" #ifdef _OPENMP #include <omp.h> #endif #if BASETYPEID == 1 typedef double real; typedef Sleef_double2 sc_t; #define BASETYPESTRING "double" #define MAGIC 0x27182818 #define MAGIC2D 0x17320508 #define INIT SleefDFT_double_init1d #define EXECUTE SleefDFT_double_execute #define INIT2D SleefDFT_double_init2d #define CTBL ctbl_double #define REALSUB0 realSub0_double #define REALSUB1 realSub1_double #define GETINT getInt_double #define GETPTR getPtr_double #define DFTF dftf_double #define DFTB dftb_double #define TBUTF tbutf_double #define TBUTB tbutb_double #define BUTF butf_double #define BUTB butb_double #define SINCOSPI Sleef_sincospi_u05 #include "dispatchdp.h" #elif BASETYPEID == 2 typedef float real; typedef Sleef_float2 sc_t; #define BASETYPESTRING "float" #define MAGIC 0x31415926 #define MAGIC2D 0x22360679 #define INIT SleefDFT_float_init1d #define EXECUTE SleefDFT_float_execute #define INIT2D SleefDFT_float_init2d #define CTBL ctbl_float #define REALSUB0 realSub0_float #define REALSUB1 realSub1_float #define GETINT getInt_float #define GETPTR getPtr_float #define DFTF dftf_float #define DFTB dftb_float #define TBUTF tbutf_float #define TBUTB tbutb_float #define BUTF butf_float #define BUTB butb_float #define SINCOSPI Sleef_sincospif_u05 #include "dispatchsp.h" #elif BASETYPEID == 3 typedef long double real; typedef Sleef_longdouble2 sc_t; #define BASETYPESTRING "long double" #define MAGIC 0x14142135 #define MAGIC2D 0x26457513 #define INIT SleefDFT_longdouble_init1d #define EXECUTE SleefDFT_longdouble_execute #define INIT2D SleefDFT_longdouble_init2d #define CTBL ctbl_longdouble #define REALSUB0 realSub0_longdouble #define REALSUB1 realSub1_longdouble #define GETINT getInt_longdouble #define GETPTR getPtr_longdouble #define DFTF dftf_longdouble #define DFTB dftb_longdouble #define TBUTF tbutf_longdouble #define TBUTB tbutb_longdouble #define BUTF butf_longdouble #define BUTB butb_longdouble #define SINCOSPI Sleef_sincospil_u05 #include "dispatchld.h" #elif BASETYPEID == 4 typedef Sleef_quad real; typedef Sleef_quad2 sc_t; #define BASETYPESTRING "Sleef_quad" #define MAGIC 0x33166247 #define MAGIC2D 0x36055512 #define INIT SleefDFT_quad_init1d #define EXECUTE SleefDFT_quad_execute #define INIT2D SleefDFT_quad_init2d #define CTBL ctbl_Sleef_quad #define REALSUB0 realSub0_Sleef_quad #define REALSUB1 realSub1_Sleef_quad #define GETINT getInt_Sleef_quad #define GETPTR getPtr_Sleef_quad #define DFTF dftf_Sleef_quad #define DFTB dftb_Sleef_quad #define TBUTF tbutf_Sleef_quad #define TBUTB tbutb_Sleef_quad #define BUTF butf_Sleef_quad #define BUTB butb_Sleef_quad #define SINCOSPI Sleef_sincospiq_u05 #include "dispatchqp.h" #else #error No BASETYPEID specified #endif #define IMPORT_IS_EXPORT #include "sleefdft.h" // #if BASETYPEID == 4 real CTBL[] = { 0.7071067811865475243818940365159164684883Q, -0.7071067811865475243818940365159164684883Q, 0.9238795325112867561014214079495587839119Q, -0.382683432365089771723257530688933059082Q, 0.382683432365089771723257530688933059082Q, -0.9238795325112867561014214079495587839119Q, #if MAXBUTWIDTH >= 5 0.9807852804032304491190993878113602022495Q, -0.1950903220161282678433729148581576851029Q, 0.5555702330196022247573058028269343822103Q, -0.8314696123025452370808655033762590846891Q, 0.8314696123025452370808655033762590846891Q, -0.5555702330196022247573058028269343822103Q, 0.1950903220161282678433729148581576851029Q, -0.9807852804032304491190993878113602022495Q, #endif #if MAXBUTWIDTH >= 6 0.9951847266721968862310254699821143731242Q, -0.09801714032956060199569840382660679267701Q, 0.6343932841636454982026105398063009488396Q, -0.7730104533627369607965383602188325085081Q, 0.881921264348355029715105513066220055407Q, -0.4713967368259976485449225247492677226546Q, 0.2902846772544623676448431737195932100803Q, -0.9569403357322088649310892760624369657307Q, 0.9569403357322088649310892760624369657307Q, -0.2902846772544623676448431737195932100803Q, 0.4713967368259976485449225247492677226546Q, -0.881921264348355029715105513066220055407Q, 0.7730104533627369607965383602188325085081Q, -0.6343932841636454982026105398063009488396Q, 0.09801714032956060199569840382660679267701Q, -0.9951847266721968862310254699821143731242Q, #endif #if MAXBUTWIDTH >= 7 0.9987954562051723927007702841240899260811Q, -0.04906767432741801425355085940205324135377Q, 0.6715589548470184006194634573905233310143Q, -0.7409511253549590911932944126139233276263Q, 0.9039892931234433315823215138173907234886Q, -0.427555093430282094315230886905077056781Q, 0.336889853392220050702686798271834334173Q, -0.9415440651830207783906830087961026265475Q, 0.9700312531945439926159106824865574481009Q, -0.2429801799032638899447731489766866275204Q, 0.5141027441932217266072797923204262815489Q, -0.8577286100002720698929313536407192941624Q, 0.8032075314806449097991200569701675249235Q, -0.5956993044924333434615715265891822127742Q, 0.1467304744553617516588479505190711904561Q, -0.9891765099647809734561415551112872890371Q, 0.9891765099647809734561415551112872890371Q, -0.1467304744553617516588479505190711904561Q, 0.5956993044924333434615715265891822127742Q, -0.8032075314806449097991200569701675249235Q, 0.8577286100002720698929313536407192941624Q, -0.5141027441932217266072797923204262815489Q, 0.2429801799032638899447731489766866275204Q, -0.9700312531945439926159106824865574481009Q, 0.9415440651830207783906830087961026265475Q, -0.336889853392220050702686798271834334173Q, 0.427555093430282094315230886905077056781Q, -0.9039892931234433315823215138173907234886Q, 0.7409511253549590911932944126139233276263Q, -0.6715589548470184006194634573905233310143Q, 0.04906767432741801425355085940205324135377Q, -0.9987954562051723927007702841240899260811Q, #endif }; #else real CTBL[] = { 0.7071067811865475243818940365159164684883L, -0.7071067811865475243818940365159164684883L, 0.9238795325112867561014214079495587839119L, -0.382683432365089771723257530688933059082L, 0.382683432365089771723257530688933059082L, -0.9238795325112867561014214079495587839119L, #if MAXBUTWIDTH >= 5 0.9807852804032304491190993878113602022495L, -0.1950903220161282678433729148581576851029L, 0.5555702330196022247573058028269343822103L, -0.8314696123025452370808655033762590846891L, 0.8314696123025452370808655033762590846891L, -0.5555702330196022247573058028269343822103L, 0.1950903220161282678433729148581576851029L, -0.9807852804032304491190993878113602022495L, #endif #if MAXBUTWIDTH >= 6 0.9951847266721968862310254699821143731242L, -0.09801714032956060199569840382660679267701L, 0.6343932841636454982026105398063009488396L, -0.7730104533627369607965383602188325085081L, 0.881921264348355029715105513066220055407L, -0.4713967368259976485449225247492677226546L, 0.2902846772544623676448431737195932100803L, -0.9569403357322088649310892760624369657307L, 0.9569403357322088649310892760624369657307L, -0.2902846772544623676448431737195932100803L, 0.4713967368259976485449225247492677226546L, -0.881921264348355029715105513066220055407L, 0.7730104533627369607965383602188325085081L, -0.6343932841636454982026105398063009488396L, 0.09801714032956060199569840382660679267701L, -0.9951847266721968862310254699821143731242L, #endif #if MAXBUTWIDTH >= 7 0.9987954562051723927007702841240899260811L, -0.04906767432741801425355085940205324135377L, 0.6715589548470184006194634573905233310143L, -0.7409511253549590911932944126139233276263L, 0.9039892931234433315823215138173907234886L, -0.427555093430282094315230886905077056781L, 0.336889853392220050702686798271834334173L, -0.9415440651830207783906830087961026265475L, 0.9700312531945439926159106824865574481009L, -0.2429801799032638899447731489766866275204L, 0.5141027441932217266072797923204262815489L, -0.8577286100002720698929313536407192941624L, 0.8032075314806449097991200569701675249235L, -0.5956993044924333434615715265891822127742L, 0.1467304744553617516588479505190711904561L, -0.9891765099647809734561415551112872890371L, 0.9891765099647809734561415551112872890371L, -0.1467304744553617516588479505190711904561L, 0.5956993044924333434615715265891822127742L, -0.8032075314806449097991200569701675249235L, 0.8577286100002720698929313536407192941624L, -0.5141027441932217266072797923204262815489L, 0.2429801799032638899447731489766866275204L, -0.9700312531945439926159106824865574481009L, 0.9415440651830207783906830087961026265475L, -0.336889853392220050702686798271834334173L, 0.427555093430282094315230886905077056781L, -0.9039892931234433315823215138173907234886L, 0.7409511253549590911932944126139233276263L, -0.6715589548470184006194634573905233310143L, 0.04906767432741801425355085940205324135377L, -0.9987954562051723927007702841240899260811L, #endif }; #endif #ifndef ENABLE_STREAM #error ENABLE_STREAM not defined #endif static const int constK[] = { 0, 2, 6, 14, 38, 94, 230, 542, 1254 }; extern const char *configStr[]; extern int planFilePathSet; // Utility functions static jmp_buf sigjmp; static void sighandler(int signum) { longjmp(sigjmp, 1); } static int checkISAAvailability(int isa) { signal(SIGILL, sighandler); if (setjmp(sigjmp) == 0) { int ret = GETINT[isa] != NULL && (*GETINT[isa])(BASETYPEID); signal(SIGILL, SIG_DFL); return ret; } signal(SIGILL, SIG_DFL); return 0; } #ifdef _OPENMP static int omp_thread_count() { int n = 0; #pragma omp parallel reduction(+:n) n += 1; return n; } #endif static void startAllThreads(const int nth) { #ifdef _OPENMP volatile int8_t *state = calloc(nth, 1); int th; #pragma omp parallel for for(th=0;th<nth;th++) { state[th] = 1; for(;;) { int i; for(i=0;i<nth;i++) if (state[i] == 0) break; if (i == nth) break; } } free((void *)state); #endif } // Dispatcher static void dispatch(SleefDFT *p, const int N, real *d, const real *s, const int level, const int config) { const int K = constK[N], log2len = p->log2len; if (level == N) { if ((p->mode & SLEEF_MODE_BACKWARD) == 0) { void (*func)(real *, const real *, const int) = DFTF[config][p->isa][N]; (*func)(d, s, log2len-N); } else { void (*func)(real *, const real *, const int) = DFTB[config][p->isa][N]; (*func)(d, s, log2len-N); } } else if (level == log2len) { assert(p->vecwidth <= (1 << N)); if ((p->mode & SLEEF_MODE_BACKWARD) == 0) { void (*func)(real *, uint32_t *, const real *, const int, const real *, const int) = TBUTF[config][p->isa][N]; (*func)(d, p->perm[level], s, log2len-N, p->tbl[N][level], K); } else { void (*func)(real *, uint32_t *, const real *, const int, const real *, const int) = TBUTB[config][p->isa][N]; (*func)(d, p->perm[level], s, log2len-N, p->tbl[N][level], K); } } else { if ((p->mode & SLEEF_MODE_BACKWARD) == 0) { void (*func)(real *, uint32_t *, const int, const real *, const int, const real *, const int) = BUTF[config][p->isa][N]; (*func)(d, p->perm[level], log2len-level, s, log2len-N, p->tbl[N][level], K); } else { void (*func)(real *, uint32_t *, const int, const real *, const int, const real *, const int) = BUTB[config][p->isa][N]; (*func)(d, p->perm[level], log2len-level, s, log2len-N, p->tbl[N][level], K); } } } // Transposer #if defined(__GNUC__) && __GNUC__ < 5 // This is another workaround of a bug in gcc-4 #define LOG2BS 3 #else #define LOG2BS 4 #endif #define BS (1 << LOG2BS) #define TRANSPOSE_BLOCK(y2) do { \ for(int x2=y2+1;x2<BS;x2++) { \ element_t r = *(element_t *)&row[y2].r[x2*2+0]; \ *(element_t *)&row[y2].r[x2*2+0] = *(element_t *)&row[x2].r[y2*2+0]; \ *(element_t *)&row[x2].r[y2*2+0] = r; \ }} while(0) static void transpose(real *RESTRICT ALIGNED(256) d, real *RESTRICT ALIGNED(256) s, const int log2n, const int log2m) { if (log2n < LOG2BS || log2m < LOG2BS) { for(int y=0;y<(1 << log2n);y++) { for(int x=0;x<(1 << log2m);x++) { real r0 = s[((y << log2m)+x)*2+0]; real r1 = s[((y << log2m)+x)*2+1]; d[((x << log2n)+y)*2+0] = r0; d[((x << log2n)+y)*2+1] = r1; } } } else { #if defined(__GNUC__) && !defined(__clang__) typedef struct { real __attribute__((vector_size(sizeof(real)*BS*2))) r; } row_t; typedef struct { real __attribute__((vector_size(sizeof(real)*2))) r; } element_t; #else typedef struct { real r[BS*2]; } row_t; typedef struct { real r0, r1; } element_t; #endif for(int y=0;y<(1 << log2n);y+=BS) { for(int x=0;x<(1 << log2m);x+=BS) { row_t row[BS]; for(int y2=0;y2<BS;y2++) { row[y2] = *(row_t *)&s[(((y+y2) << log2m)+x)*2]; } #if LOG2BS == 4 TRANSPOSE_BLOCK( 0); TRANSPOSE_BLOCK( 1); TRANSPOSE_BLOCK( 2); TRANSPOSE_BLOCK( 3); TRANSPOSE_BLOCK( 4); TRANSPOSE_BLOCK( 5); TRANSPOSE_BLOCK( 6); TRANSPOSE_BLOCK( 7); TRANSPOSE_BLOCK( 8); TRANSPOSE_BLOCK( 9); TRANSPOSE_BLOCK(10); TRANSPOSE_BLOCK(11); TRANSPOSE_BLOCK(12); TRANSPOSE_BLOCK(13); TRANSPOSE_BLOCK(14); TRANSPOSE_BLOCK(15); #else for(int y2=0;y2<BS;y2++) { for(int x2=y2+1;x2<BS;x2++) { element_t r = *(element_t *)&row[y2].r[x2*2+0]; *(element_t *)&row[y2].r[x2*2+0] = *(element_t *)&row[x2].r[y2*2+0]; *(element_t *)&row[x2].r[y2*2+0] = r; } } #endif for(int y2=0;y2<BS;y2++) { *(row_t *)&d[(((x+y2) << log2n)+y)*2] = row[y2]; } } } } } #ifdef _OPENMP static void transposeMT(real *RESTRICT ALIGNED(256) d, real *RESTRICT ALIGNED(256) s, int log2n, int log2m) { if (log2n < LOG2BS || log2m < LOG2BS) { for(int y=0;y<(1 << log2n);y++) { for(int x=0;x<(1 << log2m);x++) { real r0 = s[((y << log2m)+x)*2+0]; real r1 = s[((y << log2m)+x)*2+1]; d[((x << log2n)+y)*2+0] = r0; d[((x << log2n)+y)*2+1] = r1; } } } else { #if defined(__GNUC__) && !defined(__clang__) typedef struct { real __attribute__((vector_size(sizeof(real)*BS*2))) r; } row_t; typedef struct { real __attribute__((vector_size(sizeof(real)*2))) r; } element_t; #else typedef struct { real r[BS*2]; } row_t; typedef struct { real r0, r1; } element_t; #endif int y; #pragma omp parallel for for(y=0;y<(1 << log2n);y+=BS) { for(int x=0;x<(1 << log2m);x+=BS) { row_t row[BS]; for(int y2=0;y2<BS;y2++) { row[y2] = *(row_t *)&s[(((y+y2) << log2m)+x)*2]; } #if LOG2BS == 4 TRANSPOSE_BLOCK( 0); TRANSPOSE_BLOCK( 1); TRANSPOSE_BLOCK( 2); TRANSPOSE_BLOCK( 3); TRANSPOSE_BLOCK( 4); TRANSPOSE_BLOCK( 5); TRANSPOSE_BLOCK( 6); TRANSPOSE_BLOCK( 7); TRANSPOSE_BLOCK( 8); TRANSPOSE_BLOCK( 9); TRANSPOSE_BLOCK(10); TRANSPOSE_BLOCK(11); TRANSPOSE_BLOCK(12); TRANSPOSE_BLOCK(13); TRANSPOSE_BLOCK(14); TRANSPOSE_BLOCK(15); #else for(int y2=0;y2<BS;y2++) { for(int x2=y2+1;x2<BS;x2++) { element_t r = *(element_t *)&row[y2].r[x2*2+0]; *(element_t *)&row[y2].r[x2*2+0] = *(element_t *)&row[x2].r[y2*2+0]; *(element_t *)&row[x2].r[y2*2+0] = r; } } #endif for(int y2=0;y2<BS;y2++) { *(row_t *)&d[(((x+y2) << log2n)+y)*2] = row[y2]; } } } } } #endif // #ifdef _OPENMP // Table generator static sc_t r2coefsc(int i, int log2len, int level) { return SINCOSPI((i & ((-1 << (log2len - level)) & ~(-1 << log2len))) * ((real)1.0/(1 << (log2len-1)))); } static sc_t srcoefsc(int i, int log2len, int level) { return SINCOSPI(((3*(i & (-1 << (log2len - level)))) & ~(-1 << log2len)) * ((real)1.0/(1 << (log2len-1)))); } static int makeTableRecurse(real *x, int *p, const int log2len, const int levelorg, const int levelinc, const int sign, const int top, const int bot, const int N, int cnt) { if (levelinc >= N-1) return cnt; const int level = levelorg - levelinc; if (bot - top > 4) { const int bl = 1 << (N - levelinc); const int w = bl/4; for(int j=0;j<(bot-top)/bl;j++) { for(int i=0;i<w;i++) { int a = sign*(p[(levelinc << N) + top+bl*j+i] & (-1 << (log2len - level))); sc_t sc; sc = r2coefsc(a, log2len, level); x[cnt++] = -sc.x; x[cnt++] = -sc.y; sc = srcoefsc(a, log2len, level); x[cnt++] = -sc.x; x[cnt++] = -sc.y; } cnt = makeTableRecurse(x, p, log2len, levelorg, levelinc+1, sign, top+bl*j , top+bl*j + bl/2, N, cnt); cnt = makeTableRecurse(x, p, log2len, levelorg, levelinc+2, sign, top+bl*j + bl/2, top+bl*j + bl , N, cnt); } } else if (bot - top == 4) { int a = sign*(p[(levelinc << N) + top] & (-1 << (log2len - level))); sc_t sc; sc = r2coefsc(a, log2len, level); x[cnt++] = -sc.x; x[cnt++] = -sc.y; sc = srcoefsc(a, log2len, level); x[cnt++] = -sc.x; x[cnt++] = -sc.y; } return cnt; } static uint32_t perm(int nbits, uint32_t k, int s, int d) { s = MIN(MAX(s, 0), nbits); d = MIN(MAX(d, 0), nbits); uint32_t r; r = (((k & 0xaaaaaaaa) >> 1) | ((k & 0x55555555) << 1)); r = (((r & 0xcccccccc) >> 2) | ((r & 0x33333333) << 2)); r = (((r & 0xf0f0f0f0) >> 4) | ((r & 0x0f0f0f0f) << 4)); r = (((r & 0xff00ff00) >> 8) | ((r & 0x00ff00ff) << 8)); r = ((r >> 16) | (r << 16)) >> (32-nbits); return (((r << s) | (k & ~(-1 << s))) & ~(-1 << d)) | ((((k >> s) | (r & (-1 << (nbits-s)))) << d) & ~(-1 << nbits)); } static real **makeTable(int sign, int vecwidth, int log2len, const int N, const int K) { if (log2len < N) return NULL; int *p = (int *)malloc(sizeof(int)*((N+1)<<N)); real **tbl = (real **)calloc(sizeof(real *), (log2len+1)); for(int level=N;level<=log2len;level++) { if (level == log2len && (1 << (log2len-N)) < vecwidth) { tbl[level] = NULL; continue; } int tblOffset = 0; tbl[level] = (real *)Sleef_malloc(sizeof(real) * (K << (level-N))); for(int i0=0;i0 < (1 << (log2len-N));i0+=(1 << (log2len - level))) { for(int j=0;j<N+1;j++) { for(int i=0;i<(1 << N);i++) { p[(j << N) + i] = perm(log2len, i0 + (i << (log2len-N)), log2len-level, log2len-(level-j)); } } int a = -sign*(p[((N-1) << N) + 0] & (-1 << (log2len - level))); sc_t sc = r2coefsc(a, log2len, level-N+1); tbl[level][tblOffset++] = sc.y; tbl[level][tblOffset++] = sc.x; tblOffset = makeTableRecurse(tbl[level], p, log2len, level, 0, sign, 0, 1 << N, N, tblOffset); } if (level == log2len) { real *atbl = (real *)Sleef_malloc(sizeof(real)*(K << (log2len-N))*2); tblOffset = 0; while(tblOffset < (K << (log2len-N))) { for(int k=0;k < K;k++) { for(int v = 0;v < vecwidth;v++) { assert((tblOffset + k * vecwidth + v)*2 + 1 < (K << (log2len-N))*2); atbl[(tblOffset + k * vecwidth + v)*2 + 0] = tbl[log2len][tblOffset + v * K + k]; atbl[(tblOffset + k * vecwidth + v)*2 + 1] = tbl[log2len][tblOffset + v * K + k]; } } tblOffset += K * vecwidth; } Sleef_free(tbl[log2len]); tbl[log2len] = atbl; } } free(p); return tbl; } // Random planner (for debugging) static int searchForRandomPathRecurse(SleefDFT *p, int level, int *path, int *pathConfig, uint64_t tm, int nTrial) { if (level == 0) { p->bestTime = tm; for(uint32_t j = 0;j < p->log2len+1;j++) { p->bestPathConfig[j] = pathConfig[j]; p->bestPath[j] = path[j]; } return nTrial; } if (level < 1) return nTrial-1; for(int i=0;i<10;i++) { int N; do { N = 1 + rand() % MAXBUTWIDTH; } while(p->tm[0][level*(MAXBUTWIDTH+1)+N] >= 1ULL << 60); if (p->vecwidth > (1 << N) || N == p->log2len) continue; path[level] = N; for(;;) { pathConfig[level] = rand() % CONFIGMAX; #if ENABLE_STREAM == 0 pathConfig[level] &= ~1; #endif if ((p->mode2 & SLEEF_MODE2_MT1D) == 0 && (pathConfig[level] & CONFIG_MT) != 0) continue; break; } for(int j = level-1;j >= 0;j--) path[j] = 0; nTrial = searchForRandomPathRecurse(p, level - N, path, pathConfig, 0, nTrial); if (nTrial <= 0) break; if (p->bestTime < 1ULL << 60) break; } return nTrial - 1; } // Planner #define NSHORTESTPATHS 15 #define MAXPATHLEN (MAXLOG2LEN+1) #define POSMAX (CONFIGMAX * MAXLOG2LEN * (MAXBUTWIDTH+1)) static int cln2pos(int config, int level, int N) { return (config * MAXLOG2LEN + level) * MAXBUTWIDTH + N; } static int pos2config(int pos) { return pos == -1 ? -1 : ((pos - 1) / (MAXBUTWIDTH * MAXLOG2LEN)); } static int pos2level(int pos) { return pos == -1 ? -1 : (((pos - 1) / MAXBUTWIDTH) % MAXLOG2LEN); } static int pos2N(int pos) { return pos == -1 ? -1 : ((pos - 1) % MAXBUTWIDTH + 1); } typedef struct { SleefDFT *p; int countu[POSMAX]; int path[NSHORTESTPATHS][MAXPATHLEN]; int pathLen[NSHORTESTPATHS]; uint64_t cost[NSHORTESTPATHS]; int nPaths; int *heap; int *heapLen; uint64_t *heapCost; int heapSize, nPathsInHeap; } ks_t; static ks_t *ksInit(SleefDFT *p) { ks_t *q = calloc(1, sizeof(ks_t)); q->p = p; q->heapSize = 10; q->heap = calloc(q->heapSize, sizeof(int)*MAXPATHLEN); q->heapCost = calloc(q->heapSize, sizeof(uint64_t)); q->heapLen = calloc(q->heapSize, sizeof(int)); return q; } static void ksDispose(ks_t *q) { free(q->heapCost); free(q->heapLen); free(q->heap); free(q); } // returns the number of paths in the heap static int ksSize(ks_t *q) { return q->nPathsInHeap; } // adds a path to the heap static void ksAddPath(ks_t *q, int *path, int pathLen, uint64_t cost) { assert(pathLen <= MAXPATHLEN); if (q->nPathsInHeap == q->heapSize) { q->heapSize *= 2; q->heap = realloc(q->heap, q->heapSize * sizeof(int)*MAXPATHLEN); q->heapCost = realloc(q->heapCost, q->heapSize * sizeof(uint64_t)); q->heapLen = realloc(q->heapLen, q->heapSize * sizeof(int)); } for(int i=0;i<pathLen;i++) q->heap[q->nPathsInHeap * MAXPATHLEN + i] = path[i]; q->heapLen[q->nPathsInHeap] = pathLen; q->heapCost[q->nPathsInHeap] = cost; q->nPathsInHeap++; } // returns the cost of n-th paths in the heap static uint64_t ksCost(ks_t *q, int n) { assert(0 <= n && n < q->nPathsInHeap); return q->heapCost[n]; } // copies the n-th paths in the heap to path, returns its length static int ksGetPath(ks_t *q, int *path, int n) { assert(0 <= n && n < q->nPathsInHeap); int len = q->heapLen[n]; for(int i=0;i<len;i++) path[i] = q->heap[n * MAXPATHLEN + i]; return len; } // removes the n-th paths in the heap static void ksRemove(ks_t *q, int n) { assert(0 <= n && n < q->nPathsInHeap); for(int i=n;i<q->nPathsInHeap-1;i++) { int len = q->heapLen[i+1]; assert(len < MAXPATHLEN); for(int j=0;j<len;j++) q->heap[i * MAXPATHLEN + j] = q->heap[(i+1) * MAXPATHLEN + j]; q->heapLen[i] = q->heapLen[i+1]; q->heapCost[i] = q->heapCost[i+1]; } q->nPathsInHeap--; } // returns the countu value at pos static int ksCountu(ks_t *q, int pos) { assert(0 <= pos && pos < POSMAX); return q->countu[pos]; } // set the countu value at pos to n static void ksSetCountu(ks_t *q, int pos, int n) { assert(0 <= pos && pos < POSMAX); q->countu[pos] = n; } // adds a path as one of the best k paths, returns the number best paths static int ksAddBestPath(ks_t *q, int *path, int pathLen, uint64_t cost) { assert(pathLen <= MAXPATHLEN); assert(q->nPaths < NSHORTESTPATHS); for(int i=0;i<pathLen;i++) q->path[q->nPaths][i] = path[i]; q->pathLen[q->nPaths] = pathLen; q->cost[q->nPaths] = cost; q->nPaths++; return q->nPaths; } // returns if pos is a destination static int ksIsDest(ks_t *q, int pos) { return pos2level(pos) == 0; } // returns n-th adjacent nodes at pos. static int ksAdjacent(ks_t *q, int pos, int n) { if (pos != -1 && pos2level(pos) == 0) return -1; int NMAX = MIN(MIN(q->p->log2len, MAXBUTWIDTH+1), q->p->log2len - q->p->log2vecwidth + 1); if (pos == -1) { int N = n / 2 + MAX(q->p->log2vecwidth, 1); if (N >= NMAX) return -1; return cln2pos((n & 1) * CONFIG_MT, q->p->log2len, N); } int config = (pos2config(pos) & CONFIG_MT); int N = n + 1; int level = pos2level(pos) - pos2N(pos); if (level < 0 || N >= NMAX) return -1; if (level == 0) return n == 0 ? cln2pos(0, 0, 0) : -1; return cln2pos(config, level, N); } static uint64_t ksAdjacentCost(ks_t *q, int pos, int n) { int nxpos = ksAdjacent(q, pos, n); if (nxpos == -1) return 0; int config = pos2config(nxpos), level = pos2level(nxpos), N = pos2N(nxpos); uint64_t ret0 = q->p->tm[config | 0][level*(MAXBUTWIDTH+1) + N]; uint64_t ret1 = q->p->tm[config | 1][level*(MAXBUTWIDTH+1) + N]; return MIN(ret0, ret1); } static void searchForBestPath(SleefDFT *p) { ks_t *q = ksInit(p); for(int i=0;;i++) { int v = ksAdjacent(q, -1, i); if (v == -1) break; uint64_t c = ksAdjacentCost(q, -1, i); int path[1] = { v }; ksAddPath(q, path, 1, c); } while(ksSize(q) != 0) { uint64_t bestCost = 1ULL << 60; int bestPathNum = -1; for(int i=0;i<ksSize(q);i++) { if (ksCost(q, i) < bestCost) { bestCost = ksCost(q, i); bestPathNum = i; } } if (bestPathNum == -1) break; int path[MAXPATHLEN]; int pathLen = ksGetPath(q, path, bestPathNum); uint64_t cost = ksCost(q, bestPathNum); ksRemove(q, bestPathNum); int lastPos = path[pathLen-1]; if (ksCountu(q, lastPos) >= NSHORTESTPATHS) continue; ksSetCountu(q, lastPos, ksCountu(q, lastPos)+1); if (ksIsDest(q, lastPos)) { if (ksAddBestPath(q, path, pathLen, cost) >= NSHORTESTPATHS) break; continue; } for(int i=0;;i++) { int v = ksAdjacent(q, lastPos, i); if (v == -1) break; assert(0 <= pos2N(v) && pos2N(v) <= q->p->log2len); uint64_t c = ksAdjacentCost(q, lastPos, i); path[pathLen] = v; ksAddPath(q, path, pathLen+1, cost + c); } } for(int j = p->log2len;j >= 0;j--) p->bestPath[j] = 0; if (((p->mode & SLEEF_MODE_MEASURE) != 0 || (planFilePathSet && (p->mode & SLEEF_MODE_MEASUREBITS) == 0))) { uint64_t besttm = 1ULL << 62; int bestPath = -1; const int niter = 1 + 5000000 / ((1 << p->log2len) + 1); real *s2 = NULL, *d2 = NULL; const real *s = p->in == NULL ? (s2 = (real *)memset(Sleef_malloc((2 << p->log2len) * sizeof(real)), 0, sizeof(real) * (2 << p->log2len))) : p->in; real *d = p->out == NULL ? (d2 = (real *)memset(Sleef_malloc((2 << p->log2len) * sizeof(real)), 0, sizeof(real) * (2 << p->log2len))) : p->out; #ifdef _OPENMP const int tn = omp_get_thread_num(); #else const int tn = 0; #endif real *t[] = { p->x1[tn], p->x0[tn], d }; for(int mt=0;mt<2;mt++) { for(int i=q->nPaths-1;i>=0;i--) { if (((pos2config(q->path[i][0]) & CONFIG_MT) != 0) != mt) continue; if ((p->mode & SLEEF_MODE_VERBOSE) != 0) { for(int j=0;j<q->pathLen[i];j++) { int N = pos2N(q->path[i][j]); int level = pos2level(q->path[i][j]); int config = pos2config(q->path[i][j]) & ~1; uint64_t t0 = q->p->tm[config | 0][level*(MAXBUTWIDTH+1) + N]; uint64_t t1 = q->p->tm[config | 1][level*(MAXBUTWIDTH+1) + N]; config = t0 < t1 ? config : (config | 1); if (N != 0) printf("%d(%s) ", N, configStr[config]); } } if (mt) startAllThreads(p->nThread); uint64_t tm0 = Sleef_currentTimeMicros(); for(int k=0;k<niter;k++) { int nb = 0; const real *lb = s; if ((p->pathLen & 1) == 1) nb = -1; for(int level = p->log2len, j=0;level >= 1;j++) { assert(pos2level(q->path[i][j]) == level); int N = pos2N(q->path[i][j]); int config = pos2config(q->path[i][j]) & ~1; uint64_t t0 = q->p->tm[config | 0][level*(MAXBUTWIDTH+1) + N]; uint64_t t1 = q->p->tm[config | 1][level*(MAXBUTWIDTH+1) + N]; config = t0 < t1 ? config : (config | 1); dispatch(p, N, t[nb+1], lb, level, config); level -= N; lb = t[nb+1]; nb = (nb + 1) & 1; } } uint64_t tm1 = Sleef_currentTimeMicros(); for(int k=0;k<niter;k++) { int nb = 0; const real *lb = s; if ((p->pathLen & 1) == 1) nb = -1; for(int level = p->log2len, j=0;level >= 1;j++) { assert(pos2level(q->path[i][j]) == level); int N = pos2N(q->path[i][j]); int config = pos2config(q->path[i][j]) & ~1; uint64_t t0 = q->p->tm[config | 0][level*(MAXBUTWIDTH+1) + N]; uint64_t t1 = q->p->tm[config | 1][level*(MAXBUTWIDTH+1) + N]; config = t0 < t1 ? config : (config | 1); dispatch(p, N, t[nb+1], lb, level, config); level -= N; lb = t[nb+1]; nb = (nb + 1) & 1; } } uint64_t tm2 = Sleef_currentTimeMicros(); if ((p->mode & SLEEF_MODE_VERBOSE) != 0) printf(" : %lld %lld\n", (long long int)(tm1 - tm0), (long long int)(tm2 - tm1)); if ((tm1 - tm0) < besttm) { bestPath = i; besttm = tm1 - tm0; } if ((tm2 - tm1) < besttm) { bestPath = i; besttm = tm2 - tm1; } } } for(int level = p->log2len, j=0;level >= 1;j++) { assert(pos2level(q->path[bestPath][j]) == level); int N = pos2N(q->path[bestPath][j]); int config = pos2config(q->path[bestPath][j]) & ~1; uint64_t t0 = q->p->tm[config | 0][level*(MAXBUTWIDTH+1) + N]; uint64_t t1 = q->p->tm[config | 1][level*(MAXBUTWIDTH+1) + N]; config = t0 < t1 ? config : (config | 1); p->bestPath[level] = N; p->bestPathConfig[level] = config; level -= N; } if (d2 != NULL) Sleef_free(d2); if (s2 != NULL) Sleef_free(s2); } else { for(int level = p->log2len, j=0;level >= 1;j++) { int bestPath = 0; assert(pos2level(q->path[bestPath][j]) == level); int N = pos2N(q->path[bestPath][j]); int config = pos2config(q->path[bestPath][j]); p->bestPath[level] = N; p->bestPathConfig[level] = config; level -= N; } } ksDispose(q); } // static uint64_t estimate(int log2len, int level, int N, int config) { uint64_t ret = N * 1000 + ABS(N-3) * 1000; if (log2len >= 14 && (config & CONFIG_MT) != 0) ret /= 2; return ret; } static void measureBut(SleefDFT *p) { if (p->x0 == NULL) return; // #ifdef _OPENMP const int tn = omp_get_thread_num(); #else const int tn = 0; #endif real *s = (real *)memset(p->x0[tn], 0, sizeof(real) * (2 << p->log2len)); real *d = (real *)memset(p->x1[tn], 0, sizeof(real) * (2 << p->log2len)); const int niter = 1 + 100000 / ((1 << p->log2len) + 1); #define MEASURE_REPEAT 4 for(int rep=1;rep<=MEASURE_REPEAT;rep++) { for(int config=0;config<CONFIGMAX;config++) { #if ENABLE_STREAM == 0 if ((config & 1) != 0) continue; #endif if ((p->mode2 & SLEEF_MODE2_MT1D) == 0 && (config & CONFIG_MT) != 0) continue; for(uint32_t level = p->log2len;level >= 1;level--) { for(uint32_t N=1;N<=MAXBUTWIDTH;N++) { if (level < N || p->log2len <= N) continue; if (level == N) { if ((int)p->log2len - (int)level < p->log2vecwidth) continue; uint64_t tm = Sleef_currentTimeMicros(); for(int i=0;i<niter*2;i++) { dispatch(p, N, d, s, level, config); } tm = Sleef_currentTimeMicros() - tm + 1; p->tm[config][level*(MAXBUTWIDTH+1)+N] = MIN(p->tm[config][level*(MAXBUTWIDTH+1)+N], tm); } else if (level == p->log2len) { if (p->tbl[N] == NULL || p->tbl[N][level] == NULL) continue; if (p->vecwidth > (1 << N)) continue; if ((config & CONFIG_MT) != 0) { int i1; #ifdef _OPENMP #pragma omp parallel for #endif for(i1=0;i1 < (1 << (p->log2len-N-p->log2vecwidth));i1++) { int i0 = i1 << p->log2vecwidth; p->perm[level][i1] = 2*perm(p->log2len, i0, p->log2len-level, p->log2len-(level-N)); } } else { for(int i0=0, i1=0;i0 < (1 << (p->log2len-N));i0+=p->vecwidth, i1++) { p->perm[level][i1] = 2*perm(p->log2len, i0, p->log2len-level, p->log2len-(level-N)); } } uint64_t tm = Sleef_currentTimeMicros(); for(int i=0;i<niter;i++) { dispatch(p, N, d, s, level, config); dispatch(p, N, s, d, level, config); } tm = Sleef_currentTimeMicros() - tm + 1; p->tm[config][level*(MAXBUTWIDTH+1)+N] = MIN(p->tm[config][level*(MAXBUTWIDTH+1)+N], tm); } else { if (p->tbl[N] == NULL || p->tbl[N][level] == NULL) continue; if (p->vecwidth > 2 && p->log2len <= N+2) continue; if ((int)p->log2len - (int)level < p->log2vecwidth) continue; if ((config & CONFIG_MT) != 0) { int i1; #ifdef _OPENMP #pragma omp parallel for #endif for(i1=0;i1 < (1 << (p->log2len-N-p->log2vecwidth));i1++) { int i0 = i1 << p->log2vecwidth; p->perm[level][i1] = 2*perm(p->log2len, i0, p->log2len-level, p->log2len-(level-N)); } } else { for(int i0=0, i1=0;i0 < (1 << (p->log2len-N));i0+=p->vecwidth, i1++) { p->perm[level][i1] = 2*perm(p->log2len, i0, p->log2len-level, p->log2len-(level-N)); } } uint64_t tm = Sleef_currentTimeMicros(); for(int i=0;i<niter;i++) { dispatch(p, N, d, s, level, config); dispatch(p, N, s, d, level, config); } tm = Sleef_currentTimeMicros() - tm + 1; p->tm[config][level*(MAXBUTWIDTH+1)+N] = MIN(p->tm[config][level*(MAXBUTWIDTH+1)+N], tm); } } } } } if ((p->mode & SLEEF_MODE_VERBOSE) != 0) { for(uint32_t level = p->log2len;level >= 1;level--) { for(uint32_t N=1;N<=MAXBUTWIDTH;N++) { if (level < N || p->log2len <= N) continue; if (level == N) { if ((int)p->log2len - (int)level < p->log2vecwidth) continue; printf("bot %d, %d, %d, ", p->log2len, level, N); for(int config=0;config<CONFIGMAX;config++) { if (p->tm[config][level*(MAXBUTWIDTH+1)+N] == 1ULL << 60) { printf("N/A, "); } else { printf("%lld, ", (long long int)p->tm[config][level*(MAXBUTWIDTH+1)+N]); } } printf("\n"); } else if (level == p->log2len) { if (p->tbl[N] == NULL || p->tbl[N][level] == NULL) continue; if (p->vecwidth > (1 << N)) continue; printf("top %d, %d, %d, ", p->log2len, level, N); for(int config=0;config<CONFIGMAX;config++) { if (p->tm[config][level*(MAXBUTWIDTH+1)+N] == 1ULL << 60) { printf("N/A, "); } else { printf("%lld, ", (long long int)p->tm[config][level*(MAXBUTWIDTH+1)+N]); } } printf("\n"); } else { if (p->tbl[N] == NULL || p->tbl[N][level] == NULL) continue; if (p->vecwidth > 2 && p->log2len <= N+2) continue; if ((int)p->log2len - (int)level < p->log2vecwidth) continue; printf("mid %d, %d, %d, ", p->log2len, level, N); for(int config=0;config<CONFIGMAX;config++) { if (p->tm[config][level*(MAXBUTWIDTH+1)+N] == 1ULL << 60) { printf("N/A, "); } else { printf("%lld, ", (long long int)p->tm[config][level*(MAXBUTWIDTH+1)+N]); } } printf("\n"); } } } } } static void estimateBut(SleefDFT *p) { for(uint32_t level = p->log2len;level >= 1;level--) { for(uint32_t N=1;N<=MAXBUTWIDTH;N++) { if (level < N || p->log2len <= N) continue; if (level == N) { if ((int)p->log2len - (int)level < p->log2vecwidth) continue; for(int config=0;config<CONFIGMAX;config++) { #if ENABLE_STREAM == 0 if ((config & 1) != 0) continue; #endif p->tm[config][level*(MAXBUTWIDTH+1)+N] = estimate(p->log2len, level, N, config); } } else if (level == p->log2len) { if (p->tbl[N] == NULL || p->tbl[N][level] == NULL) continue; if (p->vecwidth > (1 << N)) continue; for(int config=0;config<CONFIGMAX;config++) { #if ENABLE_STREAM == 0 if ((config & 1) != 0) continue; #endif p->tm[config][level*(MAXBUTWIDTH+1)+N] = estimate(p->log2len, level, N, config); } } else { if (p->tbl[N] == NULL || p->tbl[N][level] == NULL) continue; if (p->vecwidth > 2 && p->log2len <= N+2) continue; if ((int)p->log2len - (int)level < p->log2vecwidth) continue; for(int config=0;config<CONFIGMAX;config++) { #if ENABLE_STREAM == 0 if ((config & 1) != 0) continue; #endif p->tm[config][level*(MAXBUTWIDTH+1)+N] = estimate(p->log2len, level, N, config); } } } } } static int measure(SleefDFT *p, int randomize) { if (p->log2len == 1) { p->bestTime = 1ULL << 60; p->pathLen = 1; p->bestPath[1] = 1; return 1; } if (PlanManager_loadMeasurementResultsP(p, (p->mode & SLEEF_MODE_NO_MT) != 0 ? 1 : 0)) { if ((p->mode & SLEEF_MODE_VERBOSE) != 0) { printf("Path(loaded) : "); for(int j = p->log2len;j >= 0;j--) if (p->bestPath[j] != 0) printf("%d(%s) ", p->bestPath[j], configStr[p->bestPathConfig[j]]); printf("\n"); } return 1; } int toBeSaved = 0; for(uint32_t level = p->log2len;level >= 1;level--) { for(uint32_t N=1;N<=MAXBUTWIDTH;N++) { for(int config=0;config<CONFIGMAX;config++) { p->tm[config][level*(MAXBUTWIDTH+1)+N] = 1ULL << 60; } } } if (((p->mode & SLEEF_MODE_MEASURE) != 0 || (planFilePathSet && (p->mode & SLEEF_MODE_MEASUREBITS) == 0)) && !randomize) { measureBut(p); toBeSaved = 1; } else { estimateBut(p); } int executable = 0; for(int i=1;i<=MAXBUTWIDTH && !executable;i++) { if (p->tm[0][p->log2len*(MAXBUTWIDTH+1)+i] < (1ULL << 60)) executable = 1; } if (!executable) return 0; p->bestTime = 1ULL << 60; p->bestPath[p->log2len] = 0; if (!randomize) { searchForBestPath(p); } else { int path[MAXLOG2LEN+1]; int pathConfig[MAXLOG2LEN+1]; for(int j = p->log2len;j >= 0;j--) path[j] = pathConfig[j] = 0; int nTrial = 100000; do { nTrial = searchForRandomPathRecurse(p, p->log2len, path, pathConfig, 0, nTrial); } while(p->bestTime == 1ULL << 60 && nTrial >= 0); } if (p->bestPath[p->log2len] == 0) return 0; p->pathLen = 0; for(int j = p->log2len;j >= 0;j--) if (p->bestPath[j] != 0) p->pathLen++; if ((p->mode & SLEEF_MODE_VERBOSE) != 0) { printf("Path"); if (randomize) printf("(random) :"); else if (toBeSaved) printf("(measured) :"); else printf("(estimated) :"); for(int j = p->log2len;j >= 0;j--) if (p->bestPath[j] != 0) printf("%d(%s) ", p->bestPath[j], configStr[p->bestPathConfig[j]]); printf("\n"); } if (toBeSaved) { PlanManager_saveMeasurementResultsP(p, (p->mode & SLEEF_MODE_NO_MT) != 0 ? 1 : 0); } return 1; } static void measureTranspose(SleefDFT *p) { if (PlanManager_loadMeasurementResultsT(p)) { if ((p->mode & SLEEF_MODE_VERBOSE) != 0) printf("transpose NoMT(loaded): %lld\n", (long long int)p->tmNoMT); if ((p->mode & SLEEF_MODE_VERBOSE) != 0) printf("transpose MT(loaded): %lld\n", (long long int)p->tmMT); return; } if ((p->mode & SLEEF_MODE_MEASURE) == 0 && (!planFilePathSet || (p->mode & SLEEF_MODE_MEASUREBITS) != 0)) { if (p->log2hlen + p->log2vlen >= 14) { p->tmNoMT = 20; p->tmMT = 10; if ((p->mode & SLEEF_MODE_VERBOSE) != 0) printf("transpose : selected MT(estimated)\n"); } else { p->tmNoMT = 10; p->tmMT = 20; if ((p->mode & SLEEF_MODE_VERBOSE) != 0) printf("transpose : selected NoMT(estimated)\n"); } return; } real *tBuf2 = (real *)Sleef_malloc(sizeof(real)*2*p->hlen*p->vlen); const int niter = 1 + 5000000 / (p->hlen * p->vlen + 1); uint64_t tm; tm = Sleef_currentTimeMicros(); for(int i=0;i<niter;i++) { transpose(tBuf2, p->tBuf, p->log2hlen, p->log2vlen); transpose(tBuf2, p->tBuf, p->log2vlen, p->log2hlen); } p->tmNoMT = Sleef_currentTimeMicros() - tm + 1; if ((p->mode & SLEEF_MODE_VERBOSE) != 0) printf("transpose NoMT(measured): %lld\n", (long long int)p->tmNoMT); #ifdef _OPENMP tm = Sleef_currentTimeMicros(); for(int i=0;i<niter;i++) { transposeMT(tBuf2, p->tBuf, p->log2hlen, p->log2vlen); transposeMT(tBuf2, p->tBuf, p->log2vlen, p->log2hlen); } p->tmMT = Sleef_currentTimeMicros() - tm + 1; if ((p->mode & SLEEF_MODE_VERBOSE) != 0) printf("transpose MT(measured): %lld\n", (long long int)p->tmMT); #else p->tmMT = p->tmNoMT*2; #endif Sleef_free(tBuf2); PlanManager_saveMeasurementResultsT(p); } // Implementation of SleefDFT_*_init1d EXPORT SleefDFT *INIT(uint32_t n, const real *in, real *out, uint64_t mode) { SleefDFT *p = (SleefDFT *)calloc(1, sizeof(SleefDFT)); p->magic = MAGIC; p->baseTypeID = BASETYPEID; p->in = (const void *)in; p->out = (void *)out; // Mode p->mode = mode; if ((p->mode & SLEEF_MODE_NO_MT) == 0) { p->mode2 |= SLEEF_MODE2_MT1D; } if ((mode & SLEEF_MODE_REAL) != 0) n /= 2; p->log2len = ilog2(n); if (p->log2len <= 1) return p; if ((mode & SLEEF_MODE_ALT) != 0) p->mode = mode = mode ^ SLEEF_MODE_BACKWARD; #ifdef _OPENMP p->nThread = omp_thread_count(); #else p->nThread = 1; p->mode2 &= ~SLEEF_MODE2_MT1D; #endif // ISA availability int bestPriority = -1; p->isa = -1; for(int i=0;i<ISAMAX;i++) { if (checkISAAvailability(i) && bestPriority < (*GETINT[i])(GETINT_DFTPRIORITY) && n >= (*GETINT[i])(GETINT_VECWIDTH) * (*GETINT[i])(GETINT_VECWIDTH)) { bestPriority = (*GETINT[i])(GETINT_DFTPRIORITY); p->isa = i; } } if (p->isa == -1) { if ((p->mode & SLEEF_MODE_VERBOSE) != 0) printf("ISA not available\n"); p->magic = 0; free(p); return NULL; } // Tables p->perm = (uint32_t **)calloc(sizeof(uint32_t *), p->log2len+1); for(int level = p->log2len;level >= 1;level--) { p->perm[level] = (uint32_t *)Sleef_malloc(sizeof(uint32_t) * ((1 << p->log2len) + 8)); } p->x0 = malloc(sizeof(real *) * p->nThread); p->x1 = malloc(sizeof(real *) * p->nThread); for(int i=0;i<p->nThread;i++) { p->x0[i] = (real *)Sleef_malloc(sizeof(real) * 2 * n); p->x1[i] = (real *)Sleef_malloc(sizeof(real) * 2 * n); } if ((mode & SLEEF_MODE_REAL) != 0) { p->rtCoef0 = (real *)Sleef_malloc(sizeof(real) * n); p->rtCoef1 = (real *)Sleef_malloc(sizeof(real) * n); if ((mode & SLEEF_MODE_BACKWARD) == 0) { for(uint32_t i=0;i<n/2;i++) { sc_t sc = SINCOSPI(i*((real)-1.0/n)); ((real *)p->rtCoef0)[i*2+0] = ((real *)p->rtCoef0)[i*2+1] = (real)0.5 - (real)0.5 * sc.x; ((real *)p->rtCoef1)[i*2+0] = ((real *)p->rtCoef1)[i*2+1] = (real)0.5*sc.y; } } else { for(uint32_t i=0;i<n/2;i++) { sc_t sc = SINCOSPI(i*((real)-1.0/n)); ((real *)p->rtCoef0)[i*2+0] = ((real *)p->rtCoef0)[i*2+1] = (real)0.5 + (real)0.5 * sc.x; ((real *)p->rtCoef1)[i*2+0] = ((real *)p->rtCoef1)[i*2+1] = (real)0.5*sc.y; } } } // Measure int sign = (mode & SLEEF_MODE_BACKWARD) != 0 ? -1 : 1; p->vecwidth = (*GETINT[p->isa])(GETINT_VECWIDTH); p->log2vecwidth = ilog2(p->vecwidth); for(int i=1;i<=MAXBUTWIDTH;i++) { ((real ***)p->tbl)[i] = makeTable(sign, p->vecwidth, p->log2len, i, constK[i]); } if (!measure(p, (mode & SLEEF_MODE_DEBUG))) { // Fall back to the first ISA freeTables(p); p->isa = 0; p->vecwidth = (*GETINT[p->isa])(GETINT_VECWIDTH); p->log2vecwidth = ilog2(p->vecwidth); for(int i=1;i<=MAXBUTWIDTH;i++) { ((real ***)p->tbl)[i] = makeTable(sign, p->vecwidth, p->log2len, i, constK[i]); } for(int level = p->log2len;level >= 1;) { int N = ABS(p->bestPath[level]); if (level == N) { level -= N; continue; } int i1 = 0; for(int i0=0;i0 < (1 << (p->log2len-N));i0+=p->vecwidth, i1++) { p->perm[level][i1] = 2*perm(p->log2len, i0, p->log2len-level, p->log2len-(level-N)); } for(;i1 < (1 << p->log2len) + 8;i1++) p->perm[level][i1] = 0; level -= N; } if (!measure(p, (mode & SLEEF_MODE_DEBUG))) { if ((p->mode & SLEEF_MODE_VERBOSE) != 0) printf("Suitable ISA not found. This should not happen.\n"); return NULL; } } for(int level = p->log2len;level >= 1;) { int N = ABS(p->bestPath[level]); if (level == N) { level -= N; continue; } int i1 = 0; for(int i0=0;i0 < (1 << (p->log2len-N));i0+=p->vecwidth, i1++) { p->perm[level][i1] = 2*perm(p->log2len, i0, p->log2len-level, p->log2len-(level-N)); } for(;i1 < (1 << p->log2len) + 8;i1++) p->perm[level][i1] = 0; level -= N; } if ((p->mode & SLEEF_MODE_VERBOSE) != 0) printf("ISA : %s %d bit %s\n", (char *)(*GETPTR[p->isa])(0), (int)(GETINT[p->isa](GETINT_VECWIDTH) * sizeof(real) * 16), BASETYPESTRING); return p; } // Implementation of SleefDFT_*_init2d EXPORT SleefDFT *INIT2D(uint32_t vlen, uint32_t hlen, const real *in, real *out, uint64_t mode) { SleefDFT *p = (SleefDFT *)calloc(1, sizeof(SleefDFT)); p->magic = MAGIC2D; p->mode = mode; p->baseTypeID = BASETYPEID; p->in = in; p->out = out; p->hlen = hlen; p->log2hlen = ilog2(hlen); p->vlen = vlen; p->log2vlen = ilog2(vlen); uint64_t mode1D = mode; mode1D |= SLEEF_MODE_NO_MT; if ((mode & SLEEF_MODE_NO_MT) == 0) p->mode3 |= SLEEF_MODE3_MT2D; p->instH = p->instV = INIT(hlen, NULL, NULL, mode1D); if (hlen != vlen) p->instV = INIT(vlen, NULL, NULL, mode1D); p->tBuf = (void *)Sleef_malloc(sizeof(real)*2*hlen*vlen); measureTranspose(p); return p; } // Implementation of SleefDFT_*_execute EXPORT void EXECUTE(SleefDFT *p, const real *s0, real *d0) { assert(p != NULL && (p->magic == MAGIC || p->magic == MAGIC2D)); const real *s = s0 == NULL ? p->in : s0; real *d = d0 == NULL ? p->out : d0; if (p->magic == MAGIC2D) { // S -> T -> D -> T -> D real *tBuf = (real *)(p->tBuf); #ifdef _OPENMP if ((p->mode3 & SLEEF_MODE3_MT2D) != 0 && (((p->mode & SLEEF_MODE_DEBUG) == 0 && p->tmMT < p->tmNoMT) || ((p->mode & SLEEF_MODE_DEBUG) != 0 && (rand() & 1)))) { int y; #pragma omp parallel for for(y=0;y<p->vlen;y++) { EXECUTE(p->instH, &s[p->hlen*2*y], &tBuf[p->hlen*2*y]); } transposeMT(d, tBuf, p->log2vlen, p->log2hlen); #pragma omp parallel for for(y=0;y<p->hlen;y++) { EXECUTE(p->instV, &d[p->vlen*2*y], &tBuf[p->vlen*2*y]); } transposeMT(d, tBuf, p->log2hlen, p->log2vlen); } else #endif { for(int y=0;y<p->vlen;y++) { EXECUTE(p->instH, &s[p->hlen*2*y], &tBuf[p->hlen*2*y]); } transpose(d, tBuf, p->log2vlen, p->log2hlen); for(int y=0;y<p->hlen;y++) { EXECUTE(p->instV, &d[p->vlen*2*y], &tBuf[p->vlen*2*y]); } transpose(d, tBuf, p->log2hlen, p->log2vlen); } return; } if (p->log2len <= 1) { if ((p->mode & SLEEF_MODE_REAL) == 0) { real r0 = s[0] + s[2]; real r1 = s[1] + s[3]; real r2 = s[0] - s[2]; real r3 = s[1] - s[3]; d[0] = r0; d[1] = r1; d[2] = r2; d[3] = r3; } else { if ((p->mode & SLEEF_MODE_ALT) == 0) { if (p->log2len == 1) { if ((p->mode & SLEEF_MODE_BACKWARD) == 0) { real r0 = s[0] + s[2] + (s[1] + s[3]); real r1 = s[0] + s[2] - (s[1] + s[3]); real r2 = s[0] - s[2]; real r3 = s[3] - s[1]; d[0] = r0; d[1] = 0; d[2] = r2; d[3] = r3; d[4] = r1; d[5] = 0; } else { real r0 = (s[0] + s[4])*(real)0.5 + s[2]; real r1 = (s[0] - s[4])*(real)0.5 - s[3]; real r2 = (s[0] + s[4])*(real)0.5 - s[2]; real r3 = (s[0] - s[4])*(real)0.5 + s[3]; d[0] = r0*2; d[1] = r1*2; d[2] = r2*2; d[3] = r3*2; } } else { if ((p->mode & SLEEF_MODE_BACKWARD) == 0) { real r0 = s[0] + s[1]; real r1 = s[0] - s[1]; d[0] = r0; d[1] = 0; d[2] = r1; d[3] = 0; } else { real r0 = s[0] + s[2]; real r1 = s[0] - s[2]; d[0] = r0; d[1] = r1; } } } else { if (p->log2len == 1) { if ((p->mode & SLEEF_MODE_BACKWARD) == 0) { real r0 = s[0] + s[2] + (s[1] + s[3]); real r1 = s[0] + s[2] - (s[1] + s[3]); real r2 = s[0] - s[2]; real r3 = s[1] - s[3]; d[0] = r0; d[1] = r1; d[2] = r2; d[3] = r3; } else { real r0 = (s[0] + s[1])*(real)0.5 + s[2]; real r1 = (s[0] - s[1])*(real)0.5 + s[3]; real r2 = (s[0] + s[1])*(real)0.5 - s[2]; real r3 = (s[0] - s[1])*(real)0.5 - s[3]; d[0] = r0; d[1] = r1; d[2] = r2; d[3] = r3; } } else { real c = ((p->mode & SLEEF_MODE_BACKWARD) != 0) ? (real)0.5 : (real)1.0; real r0 = s[0] + s[1]; real r1 = s[0] - s[1]; d[0] = r0 * c; d[1] = r1 * c; } } } return; } // #ifdef _OPENMP const int tn = omp_get_thread_num(); real *t[] = { p->x1[tn], p->x0[tn], d }; #else real *t[] = { p->x1[0], p->x0[0], d }; #endif const real *lb = s; int nb = 0; if ((p->mode & SLEEF_MODE_REAL) != 0 && (p->pathLen & 1) == 0 && ((p->mode & SLEEF_MODE_BACKWARD) != 0) != ((p->mode & SLEEF_MODE_ALT) != 0)) nb = -1; if ((p->mode & SLEEF_MODE_REAL) == 0 && (p->pathLen & 1) == 1) nb = -1; if ((p->mode & SLEEF_MODE_REAL) != 0 && ((p->mode & SLEEF_MODE_BACKWARD) != 0) != ((p->mode & SLEEF_MODE_ALT) != 0)) { (*REALSUB1[p->isa])(t[nb+1], s, p->log2len, p->rtCoef0, p->rtCoef1, (p->mode & SLEEF_MODE_ALT) == 0); if ((p-> mode & SLEEF_MODE_ALT) == 0) t[nb+1][(1 << p->log2len)+1] = -s[(1 << p->log2len)+1] * 2; lb = t[nb+1]; nb = (nb + 1) & 1; } for(int level = p->log2len;level >= 1;) { int N = ABS(p->bestPath[level]), config = p->bestPathConfig[level]; dispatch(p, N, t[nb+1], lb, level, config); level -= N; lb = t[nb+1]; nb = (nb + 1) & 1; } if ((p->mode & SLEEF_MODE_REAL) != 0 && ((p->mode & SLEEF_MODE_BACKWARD) == 0) != ((p->mode & SLEEF_MODE_ALT) != 0)) { (*REALSUB0[p->isa])(d, lb, p->log2len, p->rtCoef0, p->rtCoef1); if ((p->mode & SLEEF_MODE_ALT) == 0) { d[(1 << p->log2len)+1] = -d[(1 << p->log2len)+1]; d[(2 << p->log2len)+0] = d[1]; d[(2 << p->log2len)+1] = 0; d[1] = 0; } } }
cpl_vector-test.c
/* * This file is part of the ESO Common Pipeline Library * Copyright (C) 2001-2017 European Southern Observatory * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifdef HAVE_CONFIG_H #include <config.h> #endif /*----------------------------------------------------------------------------- Includes -----------------------------------------------------------------------------*/ #include "cpl_io_fits.h" #include "cpl_test.h" #include "cpl_tools.h" #include "cpl_math_const.h" #include "cpl_memory.h" #include "cpl_plot.h" #include <float.h> #include <assert.h> #include <math.h> /* Must have three digits, for perl-test to work */ #define VECTOR_SIZE 256 #define VECTOR_CUT 24 /* alphaev56 SIGFPE's with more than 20 */ #define POLY_SIZE 20 /*----------------------------------------------------------------------------- Private function prototypes -----------------------------------------------------------------------------*/ static void cpl_vector_cycle_test(cpl_size); static void cpl_vector_save_bench(int); static void cpl_vector_get_stdev_bench(int); static void cpl_vector_corr_bench(int); static void cpl_vector_fit_gaussian_test_one(FILE *); static void cpl_vector_valarray_bench_one(cpl_vector *, cpl_vector *); static void cpl_vector_valarray_bench(cpl_size); /*----------------------------------------------------------------------------- Main -----------------------------------------------------------------------------*/ int main(void) { double xc; double emax = 0; /* Maximum observed xc-error */ double tmp; cpl_vector * null; cpl_vector * sinus; cpl_vector * cosinus; cpl_vector * tmp_vec; cpl_vector * taylor; cpl_vector * tmp_vec2; cpl_vector * vxc; cpl_vector * vxc1; cpl_vector * vxc3; double * data; const double five[] = {1,2,3,4,5}; const cpl_size vdif = VECTOR_SIZE - VECTOR_CUT > VECTOR_CUT ? VECTOR_CUT : VECTOR_SIZE - VECTOR_CUT; const cpl_size vdif2 = (VECTOR_SIZE - VECTOR_CUT)/2 > VECTOR_CUT/2 ? VECTOR_CUT/2 : (VECTOR_SIZE - VECTOR_CUT)/2; cpl_size delta; cpl_size half_search; cpl_size i,k; cpl_boolean do_bench; FILE * stream; FILE * f_out; char filename[1024]; cpl_boolean did_test_large = CPL_FALSE; cpl_error_code error; const int omp_num_threads = #ifdef _OPENMP /* Measure scaled speed-up */ getenv("OMP_NUM_THREADS") ? atoi(getenv("OMP_NUM_THREADS")) : #endif 1; const int npe = omp_num_threads > 1 ? omp_num_threads : 1; cpl_test_init(PACKAGE_BUGREPORT, CPL_MSG_WARNING); stream = cpl_msg_get_level() > CPL_MSG_INFO ? fopen("/dev/null", "a") : stdout; do_bench = cpl_msg_get_level() <= CPL_MSG_INFO ? CPL_TRUE : CPL_FALSE; /* Insert tests below */ cpl_test_nonnull( stream ); /* Test both odd and even numbered */ cpl_vector_cycle_test(13); cpl_vector_cycle_test(16); cpl_vector_cycle_test(VECTOR_SIZE); cpl_vector_cycle_test(VECTOR_SIZE + 1); cpl_vector_fit_gaussian_test_one(stream); null = cpl_vector_new(0); cpl_test_error(CPL_ERROR_ILLEGAL_INPUT); cpl_test_null(null); /* Verify cpl_vector_get_median() with even number of samples (DFS12089) */ /* Test on unsorted vector */ tmp_vec = cpl_vector_new(4); cpl_vector_set(tmp_vec, 0, 0.0); cpl_vector_set(tmp_vec, 1, 1.0); cpl_vector_set(tmp_vec, 2, 3.0); cpl_vector_set(tmp_vec, 3, 2.0); cpl_test_rel(1.5, cpl_vector_get_median(tmp_vec), DBL_EPSILON); cpl_test_rel(1.5, cpl_vector_get_median_const(tmp_vec), DBL_EPSILON); /* Test on sorted vector */ cpl_vector_set(tmp_vec, 0, 0.0); cpl_vector_set(tmp_vec, 1, 1.0); cpl_vector_set(tmp_vec, 2, 2.0); cpl_vector_set(tmp_vec, 3, 3.0); cpl_test_rel(1.5, cpl_vector_get_median(tmp_vec), DBL_EPSILON); cpl_test_rel(1.5, cpl_vector_get_median_const(tmp_vec), DBL_EPSILON); cpl_vector_delete(tmp_vec); /* Create the vector sinus */ cpl_test_nonnull( sinus = cpl_vector_new(VECTOR_SIZE) ); /* Test cpl_vector_get_size() */ cpl_test_eq( cpl_vector_get_size(sinus), VECTOR_SIZE ); /* Fill the vector sinus */ /* Test cpl_vector_get_data(), cpl_vector_set(), cpl_vector_get() */ data = cpl_vector_get_data(sinus); cpl_test_nonnull( data ); for (i=0; i < VECTOR_SIZE; i++) { const double value = sin(i*CPL_MATH_2PI/VECTOR_SIZE); cpl_test_zero( cpl_vector_set(sinus, i, value) ); cpl_test_eq( cpl_vector_get(sinus, i), data[i] ); } /* Create a Taylor-expansion of exp() */ cpl_test_nonnull( taylor = cpl_vector_new(POLY_SIZE) ); i = 0; cpl_vector_set(taylor, i, 1); for (i=1; i<POLY_SIZE; i++) cpl_vector_set(taylor, i, cpl_vector_get(taylor, i-1)/i); /* Evaluate exp(sinus) using Horners scheme on the Taylor expansion */ tmp_vec2 = cpl_vector_new(VECTOR_SIZE); cpl_test_nonnull(tmp_vec2); error = cpl_vector_fill(tmp_vec2, cpl_vector_get(taylor, POLY_SIZE-1)); cpl_test_eq_error(error, CPL_ERROR_NONE); for (k=POLY_SIZE-1; k > 0; k--) { cpl_test_zero( cpl_vector_multiply(tmp_vec2, sinus) ); if (k&1) { cpl_test_zero( cpl_vector_add_scalar(tmp_vec2, cpl_vector_get(taylor, k-1)) ); } else { cpl_test_zero( cpl_vector_subtract_scalar(tmp_vec2, -cpl_vector_get(taylor, k-1)) ); } } /* Verify the result (against cpl_vector_exponential() ) */ cpl_test( tmp_vec = cpl_vector_duplicate(sinus) ); cpl_test_zero( cpl_vector_exponential(tmp_vec, CPL_MATH_E) ); cpl_test_zero( cpl_vector_subtract(tmp_vec2, tmp_vec) ); cpl_test_zero( cpl_vector_divide(tmp_vec2, tmp_vec) ); cpl_test_zero( cpl_vector_divide_scalar(tmp_vec2, DBL_EPSILON) ); cpl_test_leq( fabs(cpl_vector_get_max(tmp_vec2)), 2.60831 ); cpl_test_leq( fabs(cpl_vector_get_min(tmp_vec2)), 2.03626 ); /* Evaluate exp() using cpl_vector_pow() on the Taylor expansion */ cpl_test_zero( cpl_vector_fill(tmp_vec2, cpl_vector_get(taylor, 0)) ); /* POLY_SIZE > 20 on alphaev56: Program received signal SIGFPE, Arithmetic exception. 0x200000a3ff0 in cpl_vector_multiply_scalar () */ for (k=1; k < POLY_SIZE; k++) { cpl_vector * vtmp = cpl_vector_duplicate(sinus); cpl_test_zero( cpl_vector_power(vtmp, k) ); cpl_test_zero( cpl_vector_multiply_scalar(vtmp, cpl_vector_get(taylor, k)) ); cpl_test_zero( cpl_vector_add(tmp_vec2, vtmp) ); cpl_vector_delete(vtmp); } /* Much less precise than Horner ... */ cpl_test_vector_abs(tmp_vec, tmp_vec2, 8.0 * DBL_EPSILON); /* cpl_vector_fill(): Test with NULL and zero value */ error = cpl_vector_fill(NULL, 0.0); cpl_test_eq_error(error, CPL_ERROR_NULL_INPUT); error = cpl_vector_fill(taylor, 0.0); cpl_test_eq_error(error, CPL_ERROR_NONE); cpl_test_abs(cpl_vector_get_max(taylor), 0.0, 0.0); cpl_test_abs(cpl_vector_get_min(taylor), 0.0, 0.0); cpl_vector_delete(taylor); /* Verify cpl_vector_logarithm() ) */ cpl_test_zero( cpl_vector_logarithm(tmp_vec, CPL_MATH_E) ); for (i=0; i < VECTOR_SIZE; i++) { const double value = cpl_vector_get(sinus, i); double lerror = value - cpl_vector_get(tmp_vec, i); if (2*i == VECTOR_SIZE) { /* value should really be zero */ cpl_test_abs( value, 0.0, 0.552 * DBL_EPSILON ); } else { if (value != 0) lerror /= value; cpl_test_abs( lerror, 0.0, 330 * DBL_EPSILON ); } } /* Verify cpl_vector_power() */ cpl_test_zero( cpl_vector_copy(tmp_vec, sinus) ); /* Just be positive */ cpl_test_zero( cpl_vector_exponential(tmp_vec, CPL_MATH_E) ); cpl_test_zero( cpl_vector_copy(tmp_vec2, tmp_vec) ); cpl_test_zero( cpl_vector_sqrt(tmp_vec2) ); cpl_test_zero( cpl_vector_power(tmp_vec, 0.5) ); /* Necessary on AMD 64 (x86_64) Linux */ cpl_test_vector_abs(tmp_vec, tmp_vec2, 1.1 * DBL_EPSILON); cpl_test_zero( cpl_vector_copy(tmp_vec, sinus) ); cpl_test_zero( cpl_vector_exponential(tmp_vec, CPL_MATH_E) ); cpl_test_zero( cpl_vector_multiply(tmp_vec2, tmp_vec) ); cpl_test_zero( cpl_vector_power(tmp_vec, 1.5) ); cpl_test_vector_abs(tmp_vec, tmp_vec2, 8.0 * DBL_EPSILON); cpl_test_zero( cpl_vector_copy(tmp_vec2, tmp_vec) ); cpl_test_zero( cpl_vector_power(tmp_vec, 2) ); cpl_test_zero( cpl_vector_divide(tmp_vec2, tmp_vec) ); cpl_test_zero( cpl_vector_power(tmp_vec, -0.5) ); cpl_test_vector_abs(tmp_vec, tmp_vec2, 8.0 * DBL_EPSILON); cpl_test_zero( cpl_vector_fill(tmp_vec, 1) ); cpl_test_zero( cpl_vector_power(tmp_vec2, 0) ); cpl_test_vector_abs(tmp_vec, tmp_vec2, 0.0); cpl_vector_delete(tmp_vec2); /* Test 0^0 */ cpl_test_nonnull( tmp_vec2 = cpl_vector_new(VECTOR_SIZE) ); for (i = 0; i < VECTOR_SIZE; i++) cpl_test_zero( cpl_vector_set(tmp_vec2, i, 0.0) ); cpl_test_zero( cpl_vector_power(tmp_vec2, 0.0) ); cpl_test_vector_abs(tmp_vec, tmp_vec2, 0.0); cpl_vector_delete(tmp_vec); cpl_vector_delete(tmp_vec2); /* Test cpl_vector_dump() */ cpl_vector_dump(NULL, stream); cpl_vector_dump(sinus, stream); /* Test failures on cpl_vector_read() */ tmp_vec = cpl_vector_read(NULL); cpl_test_error(CPL_ERROR_NULL_INPUT); cpl_test_null( tmp_vec ); tmp_vec = cpl_vector_read("/nonexisting"); cpl_test_error(CPL_ERROR_FILE_IO); cpl_test_null( tmp_vec ); tmp_vec = cpl_vector_read("/dev/null"); cpl_test_error(CPL_ERROR_BAD_FILE_FORMAT); cpl_test_null( tmp_vec ); /* Test correct case on cpl_vector_read() */ sprintf(filename, "cpl_vector_dump.txt"); cpl_test_nonnull( filename ); cpl_test_nonnull( f_out = fopen(filename, "w") ); cpl_vector_dump(sinus, f_out); fclose(f_out); tmp_vec = cpl_vector_read("cpl_vector_dump.txt"); cpl_test_zero( remove("cpl_vector_dump.txt") ); cpl_test_nonnull( tmp_vec ); cpl_test_eq( cpl_vector_get_size(tmp_vec), cpl_vector_get_size(sinus) ); /* Test cpl_vector_save() / cpl_vector_load() */ error = cpl_vector_save(tmp_vec, "cpl_vector_save.fits", CPL_TYPE_DOUBLE, NULL, CPL_IO_CREATE | CPL_IO_EXTEND); cpl_test_eq_error( error, CPL_ERROR_ILLEGAL_INPUT ); error = cpl_vector_save(tmp_vec, "cpl_vector_save.fits", CPL_TYPE_DOUBLE, NULL, CPL_IO_APPEND); cpl_test_eq_error( error, CPL_ERROR_ILLEGAL_INPUT ); error = cpl_vector_save(tmp_vec, "cpl_vector_save.fits", CPL_TYPE_DOUBLE, NULL, CPL_IO_CREATE | CPL_IO_APPEND); cpl_test_eq_error( error, CPL_ERROR_ILLEGAL_INPUT ); error = cpl_vector_save(tmp_vec, "cpl_vector_save.fits", CPL_TYPE_DOUBLE, NULL, CPL_IO_CREATE); cpl_test_eq_error( error, CPL_ERROR_NONE ); cpl_test_fits("cpl_vector_save.fits"); tmp_vec2 = cpl_vector_load("cpl_vector_save.fits", 0); cpl_test_error( CPL_ERROR_NONE ); cpl_test_zero( remove("cpl_vector_save.fits") ); cpl_test_nonnull( tmp_vec2 ); /* Verify that the save/load did not change the vector */ cpl_test_vector_abs(tmp_vec, tmp_vec2, 0.0); cpl_vector_delete(tmp_vec2); /* Repeat test cpl_vector_save() / cpl_vector_load() on APPEND mode*/ error = cpl_vector_save(tmp_vec, "cpl_vector_save.fits", CPL_TYPE_DOUBLE, NULL, CPL_IO_CREATE); cpl_test_eq_error( error, CPL_ERROR_NONE ); cpl_test_fits("cpl_vector_save.fits"); error = cpl_vector_save(tmp_vec, "cpl_vector_save.fits", CPL_TYPE_DOUBLE, NULL, CPL_IO_EXTEND); cpl_test_eq_error( error, CPL_ERROR_NONE ); cpl_test_fits("cpl_vector_save.fits"); /* Verify that the save/load did not change the vector on 0. ext. */ tmp_vec2 = cpl_vector_load("cpl_vector_save.fits", 0); cpl_test_nonnull( tmp_vec2 ); cpl_test_vector_abs(tmp_vec, tmp_vec2, 0.0); cpl_vector_delete(tmp_vec2); /* Verify that the save/load did not change the vector on 1. ext. */ tmp_vec2 = cpl_vector_load("cpl_vector_save.fits", 1); cpl_test_nonnull( tmp_vec2 ); cpl_test_vector_abs(tmp_vec, tmp_vec2, 0.0); cpl_vector_delete(tmp_vec2); if (!cpl_io_fits_is_enabled()) { /* Decrease the number of elements by one, thus verifying that an external application may modify the file */ if (system("perl -pi -e 'BEGIN{sleep(1)};/NAXIS1/ and s/" CPL_STRINGIFY(VECTOR_SIZE) "/sprintf(\"%d\"," CPL_STRINGIFY(VECTOR_SIZE-1) ")/e' cpl_vector_save.fits") == 0) { tmp_vec2 = cpl_vector_load("cpl_vector_save.fits", 0); cpl_test_error(CPL_ERROR_NONE); cpl_test_nonnull( tmp_vec2 ); cpl_test_eq(cpl_vector_get_size(tmp_vec2), VECTOR_SIZE-1); cpl_vector_delete(tmp_vec2); } } if (sizeof(cpl_size) == 4) { #if !defined CPL_SIZE_BITS || CPL_SIZE_BITS != 32 if (!cpl_io_fits_is_enabled()) { /* Cannot load a vector longer than 2**31 - 1 */ /* Increase the number of elements to more than 2**31 */ if (system("perl -pi -e '/NAXIS1/ and s/ " CPL_STRINGIFY(VECTOR_SIZE) "/2200000000/' cpl_vector_save.fits") == 0) { tmp_vec2 = cpl_vector_load("cpl_vector_save.fits", 0); cpl_test_error(CPL_ERROR_UNSUPPORTED_MODE); cpl_test_null( tmp_vec2 ); if (tmp_vec2 != NULL) { /* The original size is VECTOR_SIZE */ cpl_test_noneq(cpl_vector_get_size(tmp_vec2), VECTOR_SIZE); cpl_vector_delete(tmp_vec2); cpl_test_assert(0); } did_test_large = CPL_TRUE; } } #endif #ifdef CPL_TEST_LARGE } else if (sizeof(cpl_size) == 8) { cpl_vector * long_vec = cpl_vector_new(2200000000L); error = cpl_vector_save(long_vec, "cpl_vector_save.fits", CPL_TYPE_DOUBLE, NULL, CPL_IO_CREATE); cpl_test_eq_error(error, CPL_ERROR_NONE); cpl_test_fits("cpl_vector_save.fits"); cpl_vector_delete(long_vec); long_vec = cpl_vector_load("cpl_vector_save.fits", 0); cpl_test_error(CPL_ERROR_NONE); cpl_test_nonnull(long_vec); cpl_vector_delete(long_vec); did_test_large = CPL_TRUE; #endif } if (!did_test_large) { cpl_msg_info(cpl_func, "I/O-testing of large vectors inactive"); } cpl_test_zero( remove("cpl_vector_save.fits") ); /* Loss of precision in cpl_vector_dump() */ cpl_test_vector_abs(tmp_vec, sinus, 10.0 * FLT_EPSILON); cpl_vector_subtract(tmp_vec, sinus); /* Same loss for positive as for negative numbers */ cpl_test_abs( cpl_vector_get_max(tmp_vec)+cpl_vector_get_min(tmp_vec), 0.0, 2.5 * DBL_EPSILON); cpl_vector_delete(tmp_vec); /* Test cpl_vector_duplicate */ tmp_vec = cpl_vector_duplicate(sinus); cpl_test_vector_abs(tmp_vec, sinus, 0.0); /* Test fill function */ cpl_test_eq_error( cpl_vector_fill(tmp_vec, 1.0), CPL_ERROR_NONE ); cpl_test_abs( cpl_vector_get_mean(tmp_vec), 1.0, DBL_EPSILON ); cpl_test_abs( cpl_vector_get_sum(tmp_vec), (double)(VECTOR_SIZE), DBL_EPSILON * sqrt((double)(VECTOR_SIZE))); (void)cpl_vector_get_sum(NULL); cpl_test_error(CPL_ERROR_NULL_INPUT); /* Test extract function */ tmp_vec2 = cpl_vector_extract(tmp_vec, 0, VECTOR_SIZE/2, 1); cpl_test_nonnull( tmp_vec2 ); cpl_vector_delete(tmp_vec2); null = cpl_vector_extract(NULL, 0, 1, 1); cpl_test_error(CPL_ERROR_NULL_INPUT); cpl_test_null( null ); null = cpl_vector_extract(tmp_vec, 2, 1, 1); cpl_test_error(CPL_ERROR_ILLEGAL_INPUT); cpl_test_null( null ); null = cpl_vector_extract(tmp_vec, 1, 2, 2); cpl_test_error(CPL_ERROR_ILLEGAL_INPUT); cpl_test_null( null ); null = cpl_vector_extract(tmp_vec, -1, 2, 1); cpl_test_error(CPL_ERROR_ACCESS_OUT_OF_RANGE); cpl_test_null( null ); null = cpl_vector_extract(tmp_vec, 0, VECTOR_SIZE + 2, 1); cpl_test_error(CPL_ERROR_ACCESS_OUT_OF_RANGE); cpl_test_null( null ); CPL_DIAG_PRAGMA_PUSH_IGN(-Wcast-qual); vxc = cpl_vector_wrap(5, (double*)five); CPL_DIAG_PRAGMA_POP; vxc1 = cpl_vector_extract(vxc, 1, 4, 1); cpl_test_error(CPL_ERROR_NONE); cpl_test_eq_ptr(five, cpl_vector_unwrap(vxc)); CPL_DIAG_PRAGMA_PUSH_IGN(-Wcast-qual); vxc = cpl_vector_wrap(4, (double*)five + 1); CPL_DIAG_PRAGMA_POP; cpl_test_vector_abs(vxc, vxc1, 0.0); (void)cpl_vector_unwrap(vxc); cpl_vector_delete(vxc1); /* Create the vector cosinus */ cosinus = cpl_vector_new(VECTOR_SIZE); cpl_test_eq( cpl_vector_get_size(sinus), VECTOR_SIZE ); /* Fill the vector cosinus */ data = cpl_vector_get_data(cosinus); cpl_test_nonnull( data ); for (i=0; i<VECTOR_SIZE; i++) data[i] = cos(i*CPL_MATH_2PI/VECTOR_SIZE); /* Test mean function */ cpl_test_abs( cpl_vector_get_mean(cosinus), 0.0, 1.68*DBL_EPSILON ); /* Test stdev function (NB: the mean-value of cosinus-squared is 1/2) */ (void)cpl_vector_get_stdev(NULL); cpl_test_error(CPL_ERROR_NULL_INPUT); cpl_test_abs( cpl_vector_get_stdev(cosinus), sqrt(VECTOR_SIZE*0.5/(VECTOR_SIZE - 1)), 0.36 * DBL_EPSILON * sqrt(VECTOR_SIZE)); /* Test copy function */ cpl_test_eq_error( cpl_vector_copy(tmp_vec, cosinus), CPL_ERROR_NONE ); cpl_test_vector_abs(tmp_vec, cosinus, 0.0); cpl_vector_delete(tmp_vec); /* Test add & sub functions */ tmp_vec = cpl_vector_duplicate(sinus); cpl_test_vector_abs(tmp_vec, sinus, 0.0); cpl_vector_add(tmp_vec, cosinus); cpl_vector_subtract(tmp_vec, sinus); cpl_test_vector_abs(tmp_vec, cosinus, DBL_EPSILON); /* Test cpl_vector_subtract_scalar() function */ cpl_test_eq_error( cpl_vector_subtract_scalar(tmp_vec, 2), CPL_ERROR_NONE ); /* Test div function */ cpl_test_eq_error( cpl_vector_divide(tmp_vec, tmp_vec), CPL_ERROR_NONE ); cpl_test_leq( cpl_vector_get_mean(tmp_vec) - 1, DBL_EPSILON ); cpl_vector_delete(tmp_vec); /* Test dot-product - using orthogonal vectors and pythagoras */ cpl_test_leq( cpl_vector_product(sinus, cosinus), DBL_EPSILON*VECTOR_SIZE); cpl_test_abs( cpl_vector_product( sinus, sinus) + cpl_vector_product(cosinus, cosinus), VECTOR_SIZE, DBL_EPSILON*VECTOR_SIZE ); /* Test filtering */ tmp_vec = cpl_vector_filter_lowpass_create(sinus, CPL_LOWPASS_LINEAR, 2); cpl_test_eq( cpl_vector_get_size(tmp_vec), cpl_vector_get_size(sinus) ); cpl_vector_delete(tmp_vec); tmp_vec = cpl_vector_filter_median_create(sinus, 2); cpl_test_error(CPL_ERROR_NONE); cpl_test_eq( cpl_vector_get_size(tmp_vec), VECTOR_SIZE ); cpl_vector_delete(tmp_vec); tmp_vec = cpl_vector_filter_median_create(sinus, VECTOR_SIZE/2); cpl_test_error(CPL_ERROR_NONE); cpl_test_eq( cpl_vector_get_size(tmp_vec), VECTOR_SIZE ); if (2 * (VECTOR_SIZE/2) == cpl_vector_get_size(tmp_vec)) cpl_test_vector_abs(tmp_vec, sinus, 0.0); cpl_vector_delete(tmp_vec); null = cpl_vector_filter_median_create(sinus, -1); cpl_test_error(CPL_ERROR_ILLEGAL_INPUT); cpl_test_null(null); null = cpl_vector_filter_median_create(sinus, 1 + VECTOR_SIZE/2); cpl_test_error(CPL_ERROR_ILLEGAL_INPUT); cpl_test_null(null); null = cpl_vector_filter_median_create(NULL, 0); cpl_test_error(CPL_ERROR_NULL_INPUT); cpl_test_null(null); /* Test existence of cpl_vector_fit_gaussian() */ error = cpl_vector_fit_gaussian(NULL, NULL, NULL, NULL, CPL_FIT_ALL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); cpl_test_eq_error(error, CPL_ERROR_NULL_INPUT ); /* sinus <- sinus*sinus */ cpl_test_eq_error( cpl_vector_multiply(sinus, sinus), CPL_ERROR_NONE ); /* Multiply by -1 */ cpl_test_eq_error( cpl_vector_multiply_scalar(sinus, -1), CPL_ERROR_NONE ); /* Add 1 */ cpl_test_eq_error( cpl_vector_add_scalar(sinus, 1), CPL_ERROR_NONE ); /* sinus <- sqrt(1-sinus^2) */ cpl_test_eq_error( cpl_vector_sqrt(sinus), CPL_ERROR_NONE ); /* Compute the absolute value of cosinus */ data = cpl_vector_get_data(cosinus); cpl_test_nonnull( data ); for (i=0; i<VECTOR_SIZE; i++) data[i] = fabs(data[i]); /* Compare fabs(cosinus) with sqrt(1-sinus^2) */ cpl_test_vector_abs(sinus, cosinus, 10.0 * DBL_EPSILON); cpl_test_zero(cpl_vector_copy(sinus, cosinus)); cpl_test_zero(cpl_vector_sort(cosinus, CPL_SORT_ASCENDING)); for (i=1; i<VECTOR_SIZE; i++) cpl_test_leq( data[i-1], data[i]); cpl_test_zero(cpl_vector_sort(sinus, CPL_SORT_DESCENDING)); data = cpl_vector_get_data(sinus); cpl_test_nonnull( data ); for (i=1; i<VECTOR_SIZE; i++) cpl_test_leq( data[i], data[i-1] ); cpl_test_abs( cpl_vector_get_mean(cosinus), cpl_vector_get_mean(sinus), 15.5*DBL_EPSILON ); /* Create a 1-element array */ tmp = 0.0; tmp_vec = cpl_vector_wrap(1, &tmp); cpl_test_nonnull( tmp_vec ); error = cpl_vector_sort(tmp_vec, CPL_SORT_ASCENDING); cpl_test_eq_error(error, CPL_ERROR_NONE); cpl_test_abs(tmp, 0.0, 0.0); error = cpl_vector_sort(tmp_vec, CPL_SORT_DESCENDING); cpl_test_eq_error(error, CPL_ERROR_NONE); cpl_test_abs(tmp, 0.0, 0.0); cpl_test_eq_ptr(cpl_vector_unwrap(tmp_vec), &tmp); error = cpl_vector_sort(sinus, 2); cpl_test_eq_error(error, CPL_ERROR_ILLEGAL_INPUT); error = cpl_vector_sort(NULL, CPL_SORT_ASCENDING); cpl_test_eq_error(error, CPL_ERROR_NULL_INPUT); cpl_vector_set_size(sinus, 1); cpl_test_zero(cpl_vector_set(sinus, 0, 0.0)); cpl_test_zero(cpl_vector_sort(sinus, CPL_SORT_DESCENDING)); cpl_test_abs(cpl_vector_get(sinus, 0), 0.0, 0.0); cpl_test_zero(cpl_vector_sort(sinus, CPL_SORT_ASCENDING)); cpl_test_abs(cpl_vector_get(sinus, 0), 0.0, 0.0); cpl_vector_delete(cosinus); cpl_vector_delete(sinus); /* Create the double-length vector sinus */ cpl_test_nonnull( sinus = cpl_vector_new(2*VECTOR_SIZE) ); /* Fill the vector sinus */ data = cpl_vector_get_data(sinus); cpl_test_nonnull( data ); for (i=0; i<2*VECTOR_SIZE; i++) data[i] = sin(i*CPL_MATH_2PI/VECTOR_SIZE); /* Create the vector cosinus */ cpl_test_nonnull( cosinus = cpl_vector_new(VECTOR_SIZE) ); /* Fill the vector cosinus */ data = cpl_vector_get_data(cosinus); cpl_test_nonnull( data ); for (i=0; i<VECTOR_SIZE; i++) data[i] = cos(i*CPL_MATH_2PI/VECTOR_SIZE); /* Create the vector tmp_vec */ tmp_vec = cpl_vector_new(VECTOR_SIZE-1); cpl_test_nonnull( tmp_vec ); cpl_test_eq_error( cpl_vector_fill(tmp_vec, 1.0), CPL_ERROR_NONE ); vxc1 = cpl_vector_new(1); vxc3 = cpl_vector_new(3); /* Various error conditions */ delta = cpl_vector_correlate(NULL, sinus, sinus); cpl_test_error( CPL_ERROR_NULL_INPUT ); cpl_test( delta < 0 ); delta = cpl_vector_correlate(vxc1, NULL, sinus); cpl_test_error( CPL_ERROR_NULL_INPUT ); cpl_test( delta < 0 ); delta = cpl_vector_correlate(vxc1, sinus, NULL); cpl_test_error( CPL_ERROR_NULL_INPUT ); cpl_test( delta < 0 ); delta = cpl_vector_correlate(cosinus, sinus, sinus); cpl_test_error( CPL_ERROR_ILLEGAL_INPUT ); cpl_test( delta < 0 ); delta = cpl_vector_correlate(vxc1, cosinus, sinus); cpl_test_error( CPL_ERROR_ILLEGAL_INPUT ); cpl_test( delta < 0 ); delta = cpl_vector_correlate(vxc3, cosinus, tmp_vec); cpl_test_error( CPL_ERROR_ILLEGAL_INPUT ); cpl_test( delta < 0 ); cpl_test_zero( cpl_vector_correlate(vxc1, cosinus, tmp_vec)); cpl_test_zero( cpl_vector_get(vxc1, 0) ); cpl_vector_delete(tmp_vec); cpl_test_zero( cpl_vector_multiply_scalar(sinus, CPL_MATH_SQRT2) ); cpl_test_zero( cpl_vector_add_scalar( sinus, CPL_MATH_PI ) ); cpl_test_zero( cpl_vector_multiply_scalar(cosinus, CPL_MATH_SQRT2) ); cpl_test_zero( cpl_vector_correlate(vxc1, sinus, sinus)); /* without -O3 a zero-tolereance would be OK */ cpl_test_leq( 1.0 - cpl_vector_get(vxc1, 0), 144.0*DBL_EPSILON ); cpl_test_zero( cpl_vector_correlate(vxc1, cosinus, cosinus) ); xc = cpl_vector_get(vxc1, 0); cpl_test_abs( xc, 1.0, 5.0 * DBL_EPSILON ); if (fabs(1-xc) > emax) emax = fabs(1-xc); if (VECTOR_SIZE % 2 == 0) { /* Sinus and cosinus have zero cross-correlation with zero shift */ cpl_test_zero( cpl_vector_correlate(vxc1, sinus, cosinus) ); xc = cpl_vector_get(vxc1, 0); cpl_test_leq( fabs(xc), 2.82*DBL_EPSILON ); if (fabs(xc) > emax) emax = fabs(xc); } /* cosinus and -cosinus have cross-correlation -1 with zero shift */ tmp_vec = cpl_vector_duplicate(cosinus); cpl_test_vector_abs(tmp_vec, cosinus, 0.0); cpl_test_zero( cpl_vector_divide_scalar(tmp_vec, -1) ); cpl_test_zero( cpl_vector_correlate(vxc1, tmp_vec, cosinus) ); xc = cpl_vector_get(vxc1, 0); cpl_vector_delete(tmp_vec); cpl_test_abs( xc, -1.0, 5.0 * DBL_EPSILON ); if (fabs(1+xc) > emax) emax = fabs(1+xc); vxc = cpl_vector_new( 1 ); if (VECTOR_SIZE % 2 == 0) { /* Cross-correlation between sinus and cosinus grows to maximum at shift of pi/2 */ for (i=0; i<VECTOR_SIZE/4; i++) { const double xcp = xc; half_search = i+1; cpl_test_zero( cpl_vector_set_size(vxc, 2*half_search + 1) ); delta = cpl_vector_correlate(vxc, sinus, cosinus); xc = cpl_vector_get(vxc, delta); cpl_test( xc > xcp ); cpl_test_eq( llabs(delta-(i+1)), i+1 ); } cpl_test_abs( xc, 1.0, 260*DBL_EPSILON); half_search = VECTOR_SIZE/3; cpl_test_zero( cpl_vector_set_size(vxc, 2*half_search + 1) ); delta = cpl_vector_correlate(vxc, sinus, cosinus); xc = cpl_vector_get(vxc, delta ); cpl_test_eq( llabs(delta-VECTOR_SIZE/3), VECTOR_SIZE/4 ); if (fabs(1-xc) > emax) emax = fabs(1-xc); } cpl_vector_delete(sinus); /* Vectors of almost the same length - no commutativity */ /* Create the vector sinus */ cpl_test_nonnull( sinus = cpl_vector_new(VECTOR_SIZE-VECTOR_CUT) ); /* Fill the vector sinus */ data = cpl_vector_get_data(sinus); cpl_test_nonnull( data ); for (i=0; i<cpl_vector_get_size(sinus); i++) data[i] = cos(i*CPL_MATH_2PI/VECTOR_SIZE); /* Compare with no shift - other than half the length difference */ half_search = VECTOR_SIZE; cpl_test_zero( cpl_vector_set_size(vxc, 2*half_search + 1) ); delta = cpl_vector_correlate(vxc, cosinus, sinus); xc = cpl_vector_get(vxc, delta); delta -= VECTOR_SIZE; cpl_test_zero( delta + VECTOR_CUT/2); cpl_test_abs( xc, 1.0, 16.5 * DBL_EPSILON ); if (fabs(1-xc) > emax) emax = fabs(1-xc); /* Compare with increasing shift and increasing drop of elements - only up to the length-difference */ for (k = 1; k < vdif; k++) { for (i=0; i<cpl_vector_get_size(sinus); i++) data[i] = cos((i+k)*CPL_MATH_2PI/VECTOR_SIZE); delta = cpl_vector_correlate(vxc, cosinus, sinus); xc = cpl_vector_get(vxc, delta); delta -= VECTOR_SIZE; cpl_test_eq( delta + VECTOR_CUT/2, k ); cpl_test_abs( xc, 1.0, 18.5 * DBL_EPSILON ); if (fabs(1-xc) > emax) emax = fabs(1-xc); } /* Continue - maximum xc found with drop */ for (; k < vdif; k++) { half_search = k-VECTOR_CUT/2; cpl_test_zero( cpl_vector_set_size(vxc, 2*half_search + 1) ); for (i=0; i<cpl_vector_get_size(sinus); i++) data[i] = cos((i+k)*CPL_MATH_2PI/VECTOR_SIZE); delta = cpl_vector_correlate(vxc, cosinus, sinus); xc = cpl_vector_get(vxc, delta); delta -= half_search; cpl_test_abs( xc, 1.0, 25.0 * DBL_EPSILON ); cpl_test_eq( delta + VECTOR_CUT/2, k ); if (fabs(1-xc) > emax) emax = fabs(1-xc); } /* Compare with increasing negative shift and increasing drop of elements - only up to half the length-difference */ half_search = VECTOR_CUT; cpl_test_zero( cpl_vector_set_size(vxc, 2*half_search + 1) ); xc = 1; for (k = 1; k < vdif2; k++) { const double xcp = xc; for (i=0; i<cpl_vector_get_size(sinus); i++) data[i] = cos((i-k)*CPL_MATH_2PI/VECTOR_SIZE); delta = cpl_vector_correlate(vxc, cosinus, sinus); xc = cpl_vector_get(vxc, delta); delta -= half_search; cpl_test_leq( xc, xcp ); cpl_test_leq( 0.0, delta + k + VECTOR_CUT/2 ); } cpl_vector_delete(sinus); /* Vectors of the same length - commutativity */ sinus = cpl_vector_duplicate(cosinus); cpl_test_vector_abs(sinus, cosinus, 0.0); half_search = VECTOR_CUT; cpl_test_zero( cpl_vector_set_size(vxc, 2*half_search + 1) ); delta = cpl_vector_correlate(vxc, sinus, cosinus); xc = cpl_vector_get(vxc, delta); delta -= half_search; cpl_test_zero( delta ); cpl_test_abs( xc, 1.0, 5.0 * DBL_EPSILON ); if (fabs(1-xc) > emax) emax = fabs(1-xc); /* Verify commutativity */ cpl_test_eq( delta+half_search, cpl_vector_correlate(vxc, cosinus, sinus) ); cpl_test_eq( xc, cpl_vector_get(vxc, delta+half_search) ); data = cpl_vector_get_data(sinus); cpl_test_nonnull( data ); half_search = VECTOR_SIZE/2; cpl_test_zero( cpl_vector_set_size(vxc, 2*half_search + 1) ); /* Compare with increasing shift and increasing drop of elements - delta tests will not hold for large shifts */ xc = 1; for (k = 1; k < VECTOR_SIZE/50; k+=7) { const double xcp = xc; double xcn; for (i=0; i<VECTOR_SIZE; i++) data[i] = cos((i+k)*CPL_MATH_2PI/VECTOR_SIZE); delta = cpl_vector_correlate(vxc, cosinus, sinus); xc = cpl_vector_get(vxc, delta); delta -= half_search; cpl_test_eq( k, delta ); cpl_test( xc < xcp ); /* Commutativity */ delta = cpl_vector_correlate(vxc, sinus, cosinus); xcn = cpl_vector_get(vxc, delta); delta -= half_search; cpl_test_eq( k, -delta); cpl_test_abs( xcn, xc, 7.0 * DBL_EPSILON); /* SUSE 9.0 */ /* Shift in opposite direction, i.e. reverse sign on k */ for (i=0; i<VECTOR_SIZE; i++) data[i] = cos((i-k)*CPL_MATH_2PI/VECTOR_SIZE); delta = cpl_vector_correlate(vxc, cosinus, sinus); xc = cpl_vector_get(vxc, delta); delta -= half_search; cpl_test_zero( k + delta); cpl_test( xc < xcp ); } half_search = VECTOR_SIZE; cpl_test_zero( cpl_vector_set_size(vxc, 2*half_search + 1) ); /* Check with pseudo-random data */ srand(1); for (i=0; i<VECTOR_SIZE; i++) data[i] = 2.0*cpl_drand() - 1.0; cpl_vector_copy(cosinus, sinus); cpl_test_eq( cpl_vector_correlate(vxc, cosinus, sinus), half_search ); /* without -O3 a zero-tolereance would be OK */ cpl_test_leq( 1.0 - cpl_vector_get(vxc, half_search), 3.5*DBL_EPSILON ); half_search = VECTOR_SIZE/2; cpl_test_zero( cpl_vector_set_size(vxc, 2*half_search + 1) ); for (k = 2; k < VECTOR_SIZE-2; k+=2) { double * pcosinus; cpl_vector_delete(cosinus); cosinus = cpl_vector_new(VECTOR_SIZE-k); pcosinus = cpl_vector_get_data(cosinus); cpl_test_nonnull( pcosinus ); for (i=0; i<VECTOR_SIZE-k; i++) pcosinus[i] = data[i]; delta = cpl_vector_correlate(vxc, sinus, cosinus); xc = cpl_vector_get(vxc, delta); delta -= half_search; cpl_test_leq( delta, 0.0 ); cpl_test_abs( xc, 1.0, 23.5 * DBL_EPSILON ); if (fabs(1-xc) > emax) emax = fabs(1-xc); } cpl_msg_info("","Largest cross-correlation rounding error [DBL_EPSILON]: " "%g", emax/DBL_EPSILON); if (do_bench) { cpl_vector_corr_bench(4 * npe); cpl_vector_get_stdev_bench(64 * npe); /* cpl_msg_set_component_on(); */ cpl_vector_save_bench(200); /* cpl_msg_set_component_off(); */ } else { cpl_vector_corr_bench(1); cpl_vector_get_stdev_bench(1); cpl_vector_save_bench(1); } if (do_bench) { cpl_vector_valarray_bench(10); cpl_vector_valarray_bench(50000); cpl_vector_valarray_bench(500000); cpl_vector_valarray_bench(5000000); } else { cpl_vector_valarray_bench(10); } if (getenv("CPL_VALARRAY_SIZE")) { const int nvalarray = atoi(getenv("CPL_VALARRAY_SIZE")); cpl_vector_valarray_bench(nvalarray); } /* Free and return */ cpl_vector_delete(cosinus); cpl_vector_delete(sinus); cpl_vector_delete(vxc); cpl_vector_delete(vxc1); cpl_vector_delete(vxc3); if (stream != stdout) cpl_test_zero( fclose(stream) ); /* End of tests */ return cpl_test_end(0); } /**@}*/ /*----------------------------------------------------------------------------*/ /** @brief Benchmark the CPL function @param n The number of repeats @return void */ /*----------------------------------------------------------------------------*/ static void cpl_vector_corr_bench(int n) { double secs; const cpl_size nsize = 10*VECTOR_SIZE*VECTOR_SIZE; cpl_vector * cosinus = cpl_vector_new(nsize); cpl_vector * vxc = cpl_vector_new(5*VECTOR_SIZE | 1); double * data = cpl_vector_get_data(cosinus); cpl_flops flops0; const size_t bytes = (size_t)n * cpl_test_get_bytes_vector(cosinus); int i; /* Fill the vector cosinus */ for (i=0; i < nsize; i++) data[i] = cos(i*CPL_MATH_2PI/nsize); flops0 = cpl_tools_get_flops(); secs = cpl_test_get_walltime(); #ifdef _OPENMP #pragma omp parallel for private(i) #endif for (i = 0; i < n; i++) { cpl_vector_correlate(vxc, cosinus, cosinus); } secs = cpl_test_get_walltime() - secs; flops0 = cpl_tools_get_flops() - flops0; if (secs > 0.0) { cpl_msg_info("","Speed during %d correlations of size %" CPL_SIZE_FORMAT " in %g secs [Mflop/s]: %g (%g)", n, nsize, secs, flops0/secs/1e6, (double)flops0); cpl_msg_info(cpl_func,"Processing rate [MB/s]: %g", 1e-6 * (double)bytes / secs); } cpl_vector_delete(cosinus); cpl_vector_delete(vxc); } /*----------------------------------------------------------------------------*/ /** @brief Benchmark the CPL function @param n The number of repeats @return void */ /*----------------------------------------------------------------------------*/ static void cpl_vector_get_stdev_bench(int n) { double secs; const cpl_size nsize = 10 * VECTOR_SIZE*VECTOR_SIZE; cpl_vector * cosinus = cpl_vector_new(nsize); double * data = cpl_vector_get_data(cosinus); cpl_flops flops0; const size_t bytes = (size_t)n * cpl_test_get_bytes_vector(cosinus); int i; /* Fill the vector cosinus */ for (i=0; i < nsize; i++) data[i] = cos(i*CPL_MATH_2PI/nsize); flops0 = cpl_tools_get_flops(); secs = cpl_test_get_walltime(); #ifdef _OPENMP #pragma omp parallel for private(i) #endif for (i = 0; i < n; i++) { cpl_test_abs( cpl_vector_get_stdev(cosinus), sqrt(nsize*0.5/(nsize - 1)), 0.36 * DBL_EPSILON * sqrt(nsize)); } secs = cpl_test_get_walltime() - secs; flops0 = cpl_tools_get_flops() - flops0; if (secs > 0.0) { cpl_msg_info(cpl_func,"Speed during %d standard devs of size %" CPL_SIZE_FORMAT "in %g secs [Mflop/s]: %g (%g)", n, nsize, secs, flops0/secs/1e6, (double)flops0); cpl_msg_info(cpl_func,"Processing rate [MB/s]: %g", 1e-6 * (double)bytes / secs); } cpl_vector_delete(cosinus); } /*----------------------------------------------------------------------------*/ /** @brief Benchmark the CPL function @param n The number of repeats @return void */ /*----------------------------------------------------------------------------*/ static void cpl_vector_save_bench(int n) { const int nprops = 100; int i; double secs; const char * filename = "cpl_vector_save_bench.fits"; const double vval[] = {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}; const int nvals = (int)(sizeof(vval)/sizeof(double)); cpl_propertylist * qclist = cpl_propertylist_new(); char key[81]; const cpl_vector * vec; size_t bytes; CPL_DIAG_PRAGMA_PUSH_IGN(-Wcast-qual); vec = cpl_vector_wrap(nvals, (double*)vval); CPL_DIAG_PRAGMA_POP; bytes = (size_t)n * cpl_test_get_bytes_vector(vec); cpl_msg_info(cpl_func, "Benchmarking with %d %d-length vectors", n, nvals); for (i = 0; i < nprops; i++) { const int nlen = snprintf(key, 81, "ESO QC CARD%04d", i); cpl_test( nlen > 0 && nlen < 81); cpl_test_zero( cpl_propertylist_append_int(qclist, key, i)); } cpl_test_eq( cpl_propertylist_get_size(qclist), nprops); secs = cpl_test_get_cputime(); for (i = 0; i < n; i++) { cpl_test_zero( cpl_vector_save(vec, filename, CPL_TYPE_DOUBLE, qclist, CPL_IO_CREATE)); } secs = cpl_test_get_cputime() - secs; cpl_msg_info(cpl_func,"Time spent saving %d %d-sized vectors [s]: %g", n, nvals, secs); if (secs > 0.0) { cpl_msg_info(cpl_func,"Processing rate [MB/s]: %g", 1e-6 * (double)bytes / secs); } cpl_test_fits(filename); cpl_test_zero( remove(filename) ); CPL_DIAG_PRAGMA_PUSH_IGN(-Wcast-qual); cpl_vector_unwrap((cpl_vector*)vec); CPL_DIAG_PRAGMA_POP; cpl_propertylist_delete(qclist); return; } /*----------------------------------------------------------------------------*/ /** @brief Reproduce DFS06126, original version by H. Lorch @param stream Output dump stream @return void */ /*----------------------------------------------------------------------------*/ static void cpl_vector_fit_gaussian_test_one(FILE * stream) { const int N = 50; cpl_vector *yval = cpl_vector_new(N); cpl_vector *xval = cpl_vector_new(N); cpl_vector *ysig = cpl_vector_new(N); cpl_matrix *cov = NULL; cpl_matrix *matrix = NULL; const double in_sigma = 10.0, in_centre = 25.0, peak = 769.52; int n; double pos = 0.0, centre, offset, sigma, area, mse, chisq; cpl_error_code error; for (n = 0; n < N; n++) { const double d = (double)pos - in_centre; error = cpl_vector_set(xval, n, pos); cpl_test_eq_error(error, CPL_ERROR_NONE); error = cpl_vector_set(yval, n, peak*exp(-d*d/(2.0*in_sigma*in_sigma))); cpl_test_eq_error(error, CPL_ERROR_NONE); /* the following line seems to make it fail. * normally, it should have no influence at all since all sigmas * are the same. strangely, using 1.0/sqrt(N-1) also fails, * but modifying this value slightly (e.g. by adding 1e-6) * lets the fitting succeed. is there a meaning in the failure * for 1.0/sqrt(integer)? */ error = cpl_vector_set(ysig, n, 1.0/sqrt(N)); cpl_test_eq_error(error, CPL_ERROR_NONE); pos += 1.0; /* create one missing value, * this has no special meaning, just replicates the generation of * the test data with which I found the problem */ if (n == 34) pos += 1.0; } cpl_vector_dump(xval, stream); cpl_vector_dump(yval, stream); cpl_vector_dump(ysig, stream); error = cpl_vector_fit_gaussian(xval, NULL, yval, ysig, CPL_FIT_ALL, &centre, &sigma, &area, &offset, &mse, &chisq, &cov); cpl_test_eq_error(error, CPL_ERROR_NONE); cpl_msg_info(cpl_func, "%d-length Gaussian fit, center: %g", N, centre); cpl_msg_info(cpl_func, "%d-length Gaussian fit, sigma: %g", N, sigma); cpl_msg_info(cpl_func, "%d-length Gaussian fit, area: %g", N, area); cpl_msg_info(cpl_func, "%d-length Gaussian fit, offset: %g", N, offset); cpl_msg_info(cpl_func, "%d-length Gaussian fit, MSE: %g", N, mse); cpl_msg_info(cpl_func, "%d-length Gaussian fit, chisq: %g", N, chisq); /* The covariance matrix must be 4 X 4, symmetric, positive definite */ cpl_test_nonnull(cov); cpl_test_eq(cpl_matrix_get_nrow(cov), 4); cpl_test_eq(cpl_matrix_get_ncol(cov), 4); matrix = cpl_matrix_transpose_create(cov); cpl_test_matrix_abs(cov, matrix, DBL_EPSILON); error = cpl_matrix_decomp_chol(matrix); cpl_test_eq_error(error, CPL_ERROR_NONE); cpl_matrix_dump(cov, stream); cpl_vector_delete(yval); cpl_vector_delete(ysig); cpl_vector_delete(xval); cpl_matrix_delete(cov); cpl_matrix_delete(matrix); } /*----------------------------------------------------------------------------*/ /** @brief Perform a number of benchmarks @param a One pre-allocated vector @param b A second pre-allocated vector of the same length @return void */ /*----------------------------------------------------------------------------*/ static void cpl_vector_valarray_bench_one(cpl_vector * a, cpl_vector * b) { const cpl_size n = cpl_vector_get_size(a); double cputime, tstop; const double tstart = cpl_test_get_cputime(); cpl_vector_add(a, b); cpl_vector_subtract(a, b); cpl_vector_multiply(a, b); cpl_vector_divide(a, b); cpl_vector_add_scalar(a, 10); cpl_vector_subtract_scalar(a, 10); cpl_vector_multiply_scalar(a, 10); cpl_vector_divide_scalar(a, 10); cpl_vector_get_min(a); cpl_vector_get_max(a); cpl_vector_get_sum(a); cpl_vector_get_mean(a); cpl_vector_get_stdev(a); tstop = cpl_test_get_cputime(); cputime = tstop - tstart; cpl_msg_info(cpl_func, "valarray-test. n=%u. CPU-time [ms]: %g", (unsigned)n, 1e3 * cputime); } /*----------------------------------------------------------------------------*/ /** @brief Perform a number of benchmarks of a given length @param n The length of the vector(s) to benchmark @return void */ /*----------------------------------------------------------------------------*/ static void cpl_vector_valarray_bench(cpl_size n) { cpl_vector * a = cpl_vector_new(n); cpl_vector * b = cpl_vector_new(n); cpl_vector_fill(a, 10.0); cpl_vector_fill(b, 15.0); cpl_vector_valarray_bench_one(a, b); cpl_vector_delete(a); cpl_vector_delete(b); } /*----------------------------------------------------------------------------*/ /** @brief Test the CPL function @param n The length of the vector(s) to test @return void */ /*----------------------------------------------------------------------------*/ static void cpl_vector_cycle_test(cpl_size n) { const cpl_boolean do_plot = cpl_msg_get_level() <= CPL_MSG_INFO ? CPL_TRUE : CPL_FALSE; cpl_vector* src; cpl_vector* dest; cpl_error_code code; src = cpl_vector_new(n); dest = cpl_vector_new(n + 1); /* Shift something non-constant, so the correctness can be verified */ code = cpl_vector_fill_kernel_profile(src, CPL_KERNEL_SINC, 1.0); cpl_test_eq_error(code, CPL_ERROR_NONE); code = cpl_vector_cycle(NULL, NULL, 1.0); cpl_test_eq_error(code, CPL_ERROR_NULL_INPUT); code = cpl_vector_cycle(NULL, src, 1.0); cpl_test_eq_error(code, CPL_ERROR_NULL_INPUT); code = cpl_vector_cycle(dest, src, 1.0); cpl_test_eq_error(code, CPL_ERROR_INCOMPATIBLE_INPUT); cpl_vector_delete(dest); dest = cpl_vector_new(n); code = cpl_vector_cycle(dest, src, CPL_MATH_SQRT2 + (double)n/3.0); cpl_test_eq_error(code, CPL_ERROR_NONE); if (do_plot) { code = cpl_plot_vector("", "w lines", "", dest); cpl_test_error(code); } code = cpl_vector_cycle(dest, NULL, -CPL_MATH_SQRT2 - (double)n/3.0); cpl_test_eq_error(code, CPL_ERROR_NONE); if (cpl_tools_is_power_of_2(n)) { cpl_test_vector_abs(dest, src, 0.5 / n); } else { cpl_test_vector_abs(dest, src, 10.0 * DBL_EPSILON); } if (do_plot) { code = cpl_vector_subtract(dest, src); cpl_test_eq_error(code, CPL_ERROR_NONE); code = cpl_plot_vector("", "w lines", "", dest); cpl_test_error(code); } code = cpl_vector_cycle(dest, src, 0.0); cpl_test_eq_error(code, CPL_ERROR_NONE); cpl_test_vector_abs(dest, src, 0.0); code = cpl_vector_cycle(dest, NULL, 0.0); cpl_test_eq_error(code, CPL_ERROR_NONE); cpl_test_vector_abs(dest, src, 0.0); /* Should not alias input, but it is supported */ code = cpl_vector_cycle(dest, dest, 0.0); cpl_test_eq_error(code, CPL_ERROR_NONE); cpl_test_vector_abs(dest, src, 0.0); code = cpl_vector_cycle(dest, src, -1.0); cpl_test_eq_error(code, CPL_ERROR_NONE); if (n > 1) { cpl_test_abs(cpl_vector_get(dest, n-1), cpl_vector_get(src, 0), 0.0); cpl_test_abs(cpl_vector_get(dest, 0), cpl_vector_get(src, 1), 0.0); } code = cpl_vector_cycle(dest, NULL, 1.0); cpl_test_eq_error(code, CPL_ERROR_NONE); cpl_test_vector_abs(dest, src, 0.0); code = cpl_vector_cycle(dest, NULL, -2.0); cpl_test_eq_error(code, CPL_ERROR_NONE); if (n > 2) { cpl_test_abs(cpl_vector_get(dest, n-2), cpl_vector_get(src, 0), 0.0); cpl_test_abs(cpl_vector_get(dest, 0), cpl_vector_get(src, 2), 0.0); } code = cpl_vector_cycle(dest, NULL, 2.0); cpl_test_eq_error(code, CPL_ERROR_NONE); cpl_test_vector_abs(dest, src, 0.0); /* Perform a range of shifts, that will cancel each other out */ for (cpl_size i = -1 - 2 * n; i <= 1 + 2 * n; i++) { code = cpl_vector_cycle(dest, NULL, i); cpl_test_eq_error(code, CPL_ERROR_NONE); } cpl_test_vector_abs(dest, src, 0.0); /* Fill the vector with a sine curve - any linear combination of full sine and cosine curves will be cycled accurately... */ for (cpl_size i=0; i < n; i++) { const double value = sin(i * CPL_MATH_2PI / n); code = cpl_vector_set(src, i, value); cpl_test_eq_error(code, CPL_ERROR_NONE); } code = cpl_vector_cycle(dest, src, CPL_MATH_E + (double)n/5.0); cpl_test_eq_error(code, CPL_ERROR_NONE); code = cpl_vector_cycle(dest, NULL, -CPL_MATH_E - (double)n/5.0); cpl_test_eq_error(code, CPL_ERROR_NONE); cpl_test_vector_abs(dest, src, 20.0 * DBL_EPSILON); /* Vectors of length 1 */ code = cpl_vector_set_size(src, 1); cpl_test_eq_error(code, CPL_ERROR_NONE); code = cpl_vector_set_size(dest, 1); cpl_test_eq_error(code, CPL_ERROR_NONE); code = cpl_vector_cycle(dest, src, 0.0); cpl_test_eq_error(code, CPL_ERROR_NONE); cpl_test_vector_abs(dest, src, 0.0); code = cpl_vector_cycle(dest, src, 1.0); cpl_test_eq_error(code, CPL_ERROR_NONE); cpl_test_vector_abs(dest, src, 0.0); code = cpl_vector_cycle(dest, src, -1.0); cpl_test_eq_error(code, CPL_ERROR_NONE); cpl_test_vector_abs(dest, src, 0.0); cpl_vector_delete(src); cpl_vector_delete(dest); }
vect-outer-simd-2.c
/* { dg-require-effective-target vect_simd_clones } */ /* { dg-additional-options "-fopenmp-simd -ffast-math" } */ #include <stdlib.h> #include "tree-vect.h" #define N 64 float *px, *py; float *tx, *ty; float *x1, *z1, *t1, *t2; static void inline bar (const float cx, float cy, float *vx, float *vy) { int j; for (j = 0; j < N; ++j) { const float dx = cx - px[j]; const float dy = cy - py[j]; *vx -= dx * tx[j]; *vy -= dy * ty[j]; } } __attribute__((noinline, noclone)) void foo1 (int n) { int i; #pragma omp simd for (i=0; i<n; i++) bar (px[i], py[i], x1+i, z1+i); } __attribute__((noinline, noclone)) void foo2 (int n) { volatile int i; for (i=0; i<n; i++) bar (px[i], py[i], x1+i, z1+i); } int main () { float *X = (float*)malloc (N * 8 * sizeof (float)); int i; int n = N - 1; check_vect (); px = &X[0]; py = &X[N * 1]; tx = &X[N * 2]; ty = &X[N * 3]; x1 = &X[N * 4]; z1 = &X[N * 5]; t1 = &X[N * 6]; t2 = &X[N * 7]; for (i=0; i<N; i++) { px[i] = (float) (i+2); tx[i] = (float) (i+1); py[i] = (float) (i+4); ty[i] = (float) (i+3); x1[i] = z1[i] = 1.0f; } foo1 (n); /* vector variant. */ for (i=0; i<N;i++) { t1[i] = x1[i]; x1[i] = 1.0f; t2[i] = z1[i]; z1[i] = 1.0f; } foo2 (n); /* scalar variant. */ for (i=0; i<N; i++) if (x1[i] != t1[i] || z1[i] != t2[i]) abort (); return 0; } /* { dg-final { scan-tree-dump "OUTER LOOP VECTORIZED" "vect" } } */
test_utils.h
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <stdio.h> #include <stdlib.h> #include <stddef.h> #include <string> #include <sstream> #include <iostream> #include <iomanip> #include <algorithm> #include <limits> #include <utility> #include <cstdint> #include <cstdlib> #include <map> extern "C" { #include "mmio.h" } #include <cuda.h> #include <cuda_runtime.h> #include <cuda_profiler_api.h> #include <library_types.h> #include <thrust/host_vector.h> #include <thrust/adjacent_difference.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/device_vector.h> #include <thrust/sequence.h> #include <rmm_utils.h> #include "cugraph.h" #ifndef CUDA_RT_CALL #define CUDA_RT_CALL( call ) \ { \ cudaError_t cudaStatus = call; \ if ( cudaSuccess != cudaStatus ) { \ fprintf(stderr, "ERROR: CUDA RT call \"%s\" in line %d of file %s failed with %s (%d).\n", \ #call, __LINE__, __FILE__, cudaGetErrorString(cudaStatus), cudaStatus); \ } \ } #endif std::function<void(gdf_column*)> gdf_col_deleter = [](gdf_column* col){ if (col) { col->size = 0; if(col->data){ cudaStream_t stream{nullptr}; ALLOC_FREE_TRY(col->data, stream); } delete col; } }; using gdf_column_ptr = typename std::unique_ptr<gdf_column, decltype(gdf_col_deleter)>; std::function<void(gdf_graph*)> gdf_graph_deleter = [](gdf_graph* G){delete G;}; using gdf_graph_ptr = typename std::unique_ptr<gdf_graph,decltype(gdf_graph_deleter)>; std::string getFileName(const std::string& s) { char sep = '/'; #ifdef _WIN32 sep = '\\'; #endif size_t i = s.rfind(sep, s.length()); if (i != std::string::npos) { return(s.substr(i+1, s.length() - i)); } return(""); } template <typename T> void verbose_diff(std::vector<T> & v1, std::vector<T> & v2) { for (unsigned int i = 0; i < v1.size(); ++i) { if (v1[i] != v2[i]) { std::cout << "[" << i <<"] : " << v1[i] << " vs. "<< v2[i]<<std::endl; } } } template <typename T> int eq(std::vector<T> & v1, std::vector<T> & v2) { if (v1 == v2) return 0; else { verbose_diff(v1,v2); return 1; } } template <typename T> void printv(size_t n, T* vec, int offset) { thrust::device_ptr<T> dev_ptr(vec); std::cout.precision(15); std::cout << "sample size = "<< n << ", offset = "<< offset << std::endl; thrust::copy(dev_ptr+offset,dev_ptr+offset+n, std::ostream_iterator<T>(std::cout, " "));//Assume no RMM dependency; TODO: check / test (potential BUG !!!!!) std::cout << std::endl; } template <typename T> void random_vals(std::vector<T> & v) { srand(42); for (auto i = 0; i < v.size(); i++) v[i]=static_cast<T>(std::rand()%10); } template <typename T_ELEM> void ref_csr2csc (int m, int n, int nnz, const T_ELEM *csrVals, const int *csrRowptr, const int *csrColInd, T_ELEM *cscVals, int *cscRowind, int *cscColptr, int base=0){ int i,j, row, col, index; int * counters; T_ELEM val; /* early return */ if ((m <= 0) || (n <= 0) || (nnz <= 0)){ return; } /* build compressed column pointers */ memset(cscColptr, 0, (n+1)*sizeof(cscColptr[0])); cscColptr[0]=base; for (i=0; i<nnz; i++){ cscColptr[1+csrColInd[i]-base]++; } for(i=0; i<n; i++){ cscColptr[i+1]+=cscColptr[i]; } /* expand row indecis and copy them and values into csc arrays according to permutation */ counters = (int *)malloc(n*sizeof(counters[0])); memset(counters, 0, n*sizeof(counters[0])); for (i=0; i<m; i++){ for (j=csrRowptr[i]; j<csrRowptr[i+1]; j++){ row = i+base; col = csrColInd[j-base]; index=cscColptr[col-base]-base+counters[col-base]; counters[col-base]++; cscRowind[index]=row; if(csrVals!=NULL || cscVals!=NULL){ val = csrVals[j-base]; cscVals[index] = val; } } } free(counters); } template <typename T> int transition_matrix_cpu(int n, int e, int *csrRowPtrA, int *csrColIndA, T *weight, T* is_leaf) //omp_set_num_threads(4); //#pragma omp parallel { int j,row, row_size; //#pragma omp for for (row=0; row<n; row++) { row_size = csrRowPtrA[row+1] - csrRowPtrA[row]; if (row_size == 0) is_leaf[row]=1.0; else { is_leaf[row]=0.0; for (j=csrRowPtrA[row]; j<csrRowPtrA[row+1]; j++) weight[j] = 1.0/row_size; } } return 0; } template <typename T> void printCsrMatI(int m, int n, int nnz,std::vector<int> & csrRowPtr, std::vector<uint16_t> & csrColInd, std::vector<T> & csrVal) { std::vector<T> v(n); std::stringstream ss; ss.str(std::string()); ss << std::fixed; ss << std::setprecision(2); for (int i = 0; i < m; i++) { std::fill(v.begin(),v.end(),0); for (int j = csrRowPtr[i]; j < csrRowPtr[i+1]; j++) v[csrColInd[j]] = csrVal[j]; std::copy(v.begin(), v.end(), std::ostream_iterator<int>(ss, " ")); ss << "\n"; } ss << "\n"; std::cout<<ss.str(); } /// Read matrix properties from Matrix Market file /** Matrix Market file is assumed to be a sparse matrix in coordinate * format. * * @param f File stream for Matrix Market file. * @param tg Boolean indicating whether to convert matrix to general * format (from symmetric, Hermitian, or skew symmetric format). * @param t (Output) MM_typecode with matrix properties. * @param m (Output) Number of matrix rows. * @param n (Output) Number of matrix columns. * @param nnz (Output) Number of non-zero matrix entries. * @return Zero if properties were read successfully. Otherwise * non-zero. */ template <typename IndexType_> int mm_properties(FILE * f, int tg, MM_typecode * t, IndexType_ * m, IndexType_ * n, IndexType_ * nnz) { // Read matrix properties from file int mint, nint, nnzint; if(fseek(f,0,SEEK_SET)) { fprintf(stderr, "Error: could not set position in file\n"); return -1; } if(mm_read_banner(f,t)) { fprintf(stderr, "Error: could not read Matrix Market file banner\n"); return -1; } if(!mm_is_matrix(*t) || !mm_is_coordinate(*t)) { fprintf(stderr, "Error: file does not contain matrix in coordinate format\n"); return -1; } if(mm_read_mtx_crd_size(f,&mint,&nint,&nnzint)) { fprintf(stderr, "Error: could not read matrix dimensions\n"); return -1; } if(!mm_is_pattern(*t) && !mm_is_real(*t) && !mm_is_integer(*t) && !mm_is_complex(*t)) { fprintf(stderr, "Error: matrix entries are not valid type\n"); return -1; } *m = mint; *n = nint; *nnz = nnzint; // Find total number of non-zero entries if(tg && !mm_is_general(*t)) { // Non-diagonal entries should be counted twice IndexType_ nnzOld = *nnz; *nnz *= 2; // Diagonal entries should not be double-counted int i; int st; for(i=0; i<nnzOld; ++i) { // Read matrix entry IndexType_ row, col; double rval, ival; if (mm_is_pattern(*t)) st = fscanf(f, "%d %d\n", &row, &col); else if (mm_is_real(*t) || mm_is_integer(*t)) st = fscanf(f, "%d %d %lg\n", &row, &col, &rval); else // Complex matrix st = fscanf(f, "%d %d %lg %lg\n", &row, &col, &rval, &ival); if(ferror(f) || (st == EOF)) { fprintf(stderr, "Error: error %d reading Matrix Market file (entry %d)\n", st, i+1); return -1; } // Check if entry is diagonal if(row == col) --(*nnz); } } return 0; } /// Read Matrix Market file and convert to COO format matrix /** Matrix Market file is assumed to be a sparse matrix in coordinate * format. * * @param f File stream for Matrix Market file. * @param tg Boolean indicating whether to convert matrix to general * format (from symmetric, Hermitian, or skew symmetric format). * @param nnz Number of non-zero matrix entries. * @param cooRowInd (Output) Row indices for COO matrix. Should have * at least nnz entries. * @param cooColInd (Output) Column indices for COO matrix. Should * have at least nnz entries. * @param cooRVal (Output) Real component of COO matrix * entries. Should have at least nnz entries. Ignored if null * pointer. * @param cooIVal (Output) Imaginary component of COO matrix * entries. Should have at least nnz entries. Ignored if null * pointer. * @return Zero if matrix was read successfully. Otherwise non-zero. */ template <typename IndexType_, typename ValueType_> int mm_to_coo(FILE *f, int tg, IndexType_ nnz, IndexType_ * cooRowInd, IndexType_ * cooColInd, ValueType_ * cooRVal , ValueType_ * cooIVal) { // Read matrix properties from file MM_typecode t; int m, n, nnzOld; if(fseek(f,0,SEEK_SET)) { fprintf(stderr, "Error: could not set position in file\n"); return -1; } if(mm_read_banner(f,&t)) { fprintf(stderr, "Error: could not read Matrix Market file banner\n"); return -1; } if(!mm_is_matrix(t) || !mm_is_coordinate(t)) { fprintf(stderr, "Error: file does not contain matrix in coordinate format\n"); return -1; } if(mm_read_mtx_crd_size(f,&m,&n,&nnzOld)) { fprintf(stderr, "Error: could not read matrix dimensions\n"); return -1; } if(!mm_is_pattern(t) && !mm_is_real(t) && !mm_is_integer(t) && !mm_is_complex(t)) { fprintf(stderr, "Error: matrix entries are not valid type\n"); return -1; } // Add each matrix entry in file to COO format matrix IndexType_ i; // Entry index in Matrix Market file IndexType_ j = 0; // Entry index in COO format matrix for(i=0;i<nnzOld;++i) { // Read entry from file int row, col; double rval, ival; int st; if (mm_is_pattern(t)) { st = fscanf(f, "%d %d\n", &row, &col); rval = 1.0; ival = 0.0; } else if (mm_is_real(t) || mm_is_integer(t)) { st = fscanf(f, "%d %d %lg\n", &row, &col, &rval); ival = 0.0; } else // Complex matrix st = fscanf(f, "%d %d %lg %lg\n", &row, &col, &rval, &ival); if(ferror(f) || (st == EOF)) { fprintf(stderr, "Error: error %d reading Matrix Market file (entry %d)\n", st, i+1); return -1; } // Switch to 0-based indexing --row; --col; // Record entry cooRowInd[j] = row; cooColInd[j] = col; if(cooRVal != NULL) cooRVal[j] = rval; if(cooIVal != NULL) cooIVal[j] = ival; ++j; // Add symmetric complement of non-diagonal entries if(tg && !mm_is_general(t) && (row!=col)) { // Modify entry value if matrix is skew symmetric or Hermitian if(mm_is_skew(t)) { rval = -rval; ival = -ival; } else if(mm_is_hermitian(t)) { ival = -ival; } // Record entry cooRowInd[j] = col; cooColInd[j] = row; if(cooRVal != NULL) cooRVal[j] = rval; if(cooIVal != NULL) cooIVal[j] = ival; ++j; } } return 0; } /// Compare two tuples based on the element indexed by i class lesser_tuple { const int i; public: lesser_tuple(int _i) : i(_i) {} template<typename Tuple1, typename Tuple2> __host__ __device__ bool operator()(const Tuple1 t1, const Tuple2 t2) { switch(i) { case 0: return (thrust::get<0>(t1) < thrust::get<0>(t2)); case 1: return (thrust::get<1>(t1) < thrust::get<1>(t2)); default: return (thrust::get<0>(t1) < thrust::get<0>(t2)); } } }; /// Sort entries in COO format matrix /** Sort is stable. * * @param nnz Number of non-zero matrix entries. * @param sort_by_row Boolean indicating whether matrix entries * will be sorted by row index or by column index. * @param cooRowInd Row indices for COO matrix. * @param cooColInd Column indices for COO matrix. * @param cooRVal Real component for COO matrix entries. Ignored if * null pointer. * @param cooIVal Imaginary component COO matrix entries. Ignored if * null pointer. */ template <typename IndexType_, typename ValueType_> void coo_sort(IndexType_ nnz, int sort_by_row, IndexType_ * cooRowInd, IndexType_ * cooColInd, ValueType_ * cooRVal, ValueType_ * cooIVal) { // Determine whether to sort by row or by column int i; if(sort_by_row == 0) i = 1; else i = 0; // Apply stable sort using namespace thrust; if((cooRVal==NULL) && (cooIVal==NULL)) stable_sort(make_zip_iterator(make_tuple(cooRowInd,cooColInd)), make_zip_iterator(make_tuple(cooRowInd+nnz,cooColInd+nnz)), lesser_tuple(i)); else if((cooRVal==NULL) && (cooIVal!=NULL)) stable_sort(make_zip_iterator(make_tuple(cooRowInd,cooColInd,cooIVal)), make_zip_iterator(make_tuple(cooRowInd+nnz,cooColInd+nnz,cooIVal+nnz)), lesser_tuple(i)); else if((cooRVal!=NULL) && (cooIVal==NULL)) stable_sort(make_zip_iterator(make_tuple(cooRowInd,cooColInd,cooRVal)), make_zip_iterator(make_tuple(cooRowInd+nnz,cooColInd+nnz,cooRVal+nnz)), lesser_tuple(i)); else stable_sort(make_zip_iterator(make_tuple(cooRowInd,cooColInd,cooRVal,cooIVal)), make_zip_iterator(make_tuple(cooRowInd+nnz,cooColInd+nnz, cooRVal+nnz,cooIVal+nnz)), lesser_tuple(i)); } template <typename IndexT> void coo2csr(std::vector<IndexT>& cooRowInd, //in: I[] (overwrite) const std::vector<IndexT>& cooColInd, //in: J[] std::vector<IndexT>& csrRowPtr, //out std::vector<IndexT>& csrColInd) //out { std::vector<std::pair<IndexT,IndexT> > items; for (auto i = 0; i < cooRowInd.size(); ++i) items.push_back(std::make_pair( cooRowInd[i], cooColInd[i])); //sort pairs std::sort(items.begin(), items.end(),[](const std::pair<IndexT,IndexT> &left, const std::pair<IndexT,IndexT> &right) {return left.first < right.first; }); for (auto i = 0; i < cooRowInd.size(); ++i) { cooRowInd[i]=items[i].first; // save the sorted rows to compress them later csrColInd[i]=items[i].second; // save the col idx, not sure if they are sorted for each row } // Count number of elements per row for(auto i=0; i<cooRowInd.size(); ++i) ++(csrRowPtr[cooRowInd[i]+1]); // Compute cumulative sum to obtain row offsets/pointers for(auto i=0; i<csrRowPtr.size()-1; ++i) csrRowPtr[i+1] += csrRowPtr[i]; } /// Compress sorted list of indices /** For use in converting COO format matrix to CSR or CSC format. * * @param n Maximum index. * @param nnz Number of non-zero matrix entries. * @param sortedIndices Sorted list of indices (COO format). * @param compressedIndices (Output) Compressed list of indices (CSR * or CSC format). Should have at least n+1 entries. */ template <typename IndexType_> void coo_compress(IndexType_ m, IndexType_ n, IndexType_ nnz, const IndexType_ * __restrict__ sortedIndices, IndexType_ * __restrict__ compressedIndices) { IndexType_ i; // Initialize everything to zero memset(compressedIndices, 0, (m+1)*sizeof(IndexType_)); // Count number of elements per row for(i=0; i<nnz; ++i) ++(compressedIndices[sortedIndices[i]+1]); // Compute cumulative sum to obtain row offsets/pointers for(i=0; i<m; ++i) compressedIndices[i+1] += compressedIndices[i]; } /// Convert COO format matrix to CSR format /** On output, matrix entries in COO format matrix will be sorted * (primarily by row index, secondarily by column index). * * @param m Number of matrix rows. * @param n Number of matrix columns. * @param nnz Number of non-zero matrix entries. * @param cooRowInd Row indices for COO matrix. * @param cooColInd Column indices for COO matrix. * @param cooRVal Real component of COO matrix entries. Ignored if * null pointer. * @param cooIVal Imaginary component of COO matrix entries. Ignored * if null pointer. * @param csrRowPtr Row pointers for CSR matrix. Should have at least * n+1 entries. * @param csrColInd Column indices for CSR matrix (identical to * output of cooColInd). Should have at least nnz entries. Ignored if * null pointer. * @param csrRVal Real component of CSR matrix entries (identical to * output of cooRVal). Should have at least nnz entries. Ignored if * null pointer. * @param csrIVal Imaginary component of CSR matrix entries * (identical to output of cooIVal). Should have at least nnz * entries. Ignored if null pointer. * @return Zero if matrix was converted successfully. Otherwise * non-zero. */ template <typename IndexType_, typename ValueType_> int coo_to_csr(IndexType_ m, IndexType_ n, IndexType_ nnz, IndexType_ * __restrict__ cooRowInd, IndexType_ * __restrict__ cooColInd, ValueType_ * __restrict__ cooRVal, ValueType_ * __restrict__ cooIVal, IndexType_ * __restrict__ csrRowPtr, IndexType_ * __restrict__ csrColInd, ValueType_ * __restrict__ csrRVal, ValueType_ * __restrict__ csrIVal) { // Convert COO to CSR matrix coo_sort(nnz, 0, cooRowInd, cooColInd, cooRVal, cooIVal); coo_sort(nnz, 1, cooRowInd, cooColInd, cooRVal, cooIVal); //coo_sort2<int,float>(m, nnz, cooRowInd, cooColInd); coo_compress(m, n, nnz, cooRowInd, csrRowPtr); // Copy arrays if(csrColInd!=NULL) memcpy(csrColInd, cooColInd, nnz*sizeof(IndexType_)); if((cooRVal!=NULL) && (csrRVal!=NULL)) memcpy(csrRVal, cooRVal, nnz*sizeof(ValueType_)); if((cooIVal!=NULL) && (csrIVal!=NULL)) memcpy(csrIVal, cooIVal, nnz*sizeof(ValueType_)); return 0; } int read_binary_vector ( FILE* fpin, int n, std::vector<float>& val ) { size_t is_read1; double* t_storage = new double[n]; is_read1 = fread(t_storage, sizeof(double), n, fpin); for (int i = 0; i < n; i++) { if (t_storage[i] == DBL_MAX) val[i] = FLT_MAX; else if (t_storage[i] == -DBL_MAX) val[i] = -FLT_MAX; else val[i] = static_cast<float>(t_storage[i]); } delete[] t_storage; if (is_read1 != (size_t)n) { printf("%s", "I/O fail\n"); return 1; } return 0; } int read_binary_vector ( FILE* fpin, int n, std::vector<double>& val ) { size_t is_read1; is_read1 = fread(&val[0], sizeof(double), n, fpin); if (is_read1 != (size_t)n) { printf("%s", "I/O fail\n"); return 1; } return 0; } // Creates a gdf_column from a std::vector template <typename col_type> gdf_column_ptr create_gdf_column(std::vector<col_type> const & host_vector) { // Create a new instance of a gdf_column with a custom deleter that will free // the associated device memory when it eventually goes out of scope gdf_column_ptr the_column{new gdf_column, gdf_col_deleter}; // Allocate device storage for gdf_column and copy contents from host_vector const size_t input_size_bytes = host_vector.size() * sizeof(col_type); cudaStream_t stream{nullptr}; ALLOC_TRY((void**)&(the_column->data), input_size_bytes, stream); cudaMemcpy(the_column->data, host_vector.data(), input_size_bytes, cudaMemcpyHostToDevice); // Deduce the type and set the gdf_dtype accordingly gdf_dtype gdf_col_type; if(std::is_same<col_type,int8_t>::value) gdf_col_type = GDF_INT8; else if(std::is_same<col_type,uint8_t>::value) gdf_col_type = GDF_INT8; else if(std::is_same<col_type,int16_t>::value) gdf_col_type = GDF_INT16; else if(std::is_same<col_type,uint16_t>::value) gdf_col_type = GDF_INT16; else if(std::is_same<col_type,int32_t>::value) gdf_col_type = GDF_INT32; else if(std::is_same<col_type,uint32_t>::value) gdf_col_type = GDF_INT32; else if(std::is_same<col_type,int64_t>::value) gdf_col_type = GDF_INT64; else if(std::is_same<col_type,uint64_t>::value) gdf_col_type = GDF_INT64; else if(std::is_same<col_type,float>::value) gdf_col_type = GDF_FLOAT32; else if(std::is_same<col_type,double>::value) gdf_col_type = GDF_FLOAT64; // Fill the gdf_column members the_column->valid = nullptr; the_column->null_count = 0; the_column->size = host_vector.size(); the_column->dtype = gdf_col_type; gdf_dtype_extra_info extra_info; extra_info.time_unit = TIME_UNIT_NONE; the_column->dtype_info = extra_info; return the_column; } // Creates a gdf_column from a std::vector template <typename col_type> void create_gdf_column(std::vector<col_type> const & host_vector, gdf_column * the_column) { // Allocate device storage for gdf_column and copy contents from host_vector const size_t input_size_bytes = host_vector.size() * sizeof(col_type); cudaStream_t stream{nullptr}; ALLOC_TRY((void**)&(the_column->data), input_size_bytes, stream); cudaMemcpy(the_column->data, host_vector.data(), input_size_bytes, cudaMemcpyHostToDevice); // Deduce the type and set the gdf_dtype accordingly gdf_dtype gdf_col_type; if(std::is_same<col_type,int8_t>::value) gdf_col_type = GDF_INT8; else if(std::is_same<col_type,uint8_t>::value) gdf_col_type = GDF_INT8; else if(std::is_same<col_type,int16_t>::value) gdf_col_type = GDF_INT16; else if(std::is_same<col_type,uint16_t>::value) gdf_col_type = GDF_INT16; else if(std::is_same<col_type,int32_t>::value) gdf_col_type = GDF_INT32; else if(std::is_same<col_type,uint32_t>::value) gdf_col_type = GDF_INT32; else if(std::is_same<col_type,int64_t>::value) gdf_col_type = GDF_INT64; else if(std::is_same<col_type,uint64_t>::value) gdf_col_type = GDF_INT64; else if(std::is_same<col_type,float>::value) gdf_col_type = GDF_FLOAT32; else if(std::is_same<col_type,double>::value) gdf_col_type = GDF_FLOAT64; // Fill the gdf_column members the_column->valid = nullptr; the_column->null_count = 0; the_column->size = host_vector.size(); the_column->dtype = gdf_col_type; gdf_dtype_extra_info extra_info; extra_info.time_unit = TIME_UNIT_NONE; the_column->dtype_info = extra_info; } void gdf_col_delete(gdf_column* col) { if (col) { col->size = 0; cudaStream_t stream{nullptr}; if(col->data) ALLOC_FREE_TRY(col->data, stream); #if 1 // If delete col is executed, the memory pointed by col is no longer valid and // can be used in another memory allocation, so executing col->data = nullptr // after delete col is dangerous, also, col = nullptr has no effect here (the // address is passed by value, for col = nullptr should work, the input // parameter should be gdf_column*& col (or alternatively, gdf_column** col and // *col = nullptr also work) col->data = nullptr; delete col; #else delete col; col->data = nullptr; col = nullptr; #endif } } //////////////////////////////////////////////////////////////////////////////// // TODO: move this code to rapids-core //////////////////////////////////////////////////////////////////////////////// // Define RAPIDS_DATASET_ROOT_DIR using a preprocessor variable to // allow for a build to override the default. This is useful for // having different builds for specific default dataset locations. #ifndef RAPIDS_DATASET_ROOT_DIR #define RAPIDS_DATASET_ROOT_DIR "/datasets" #endif static const std::string& get_rapids_dataset_root_dir() { static std::string rdrd(""); // Env var always overrides the value of RAPIDS_DATASET_ROOT_DIR if (rdrd == "") { const char* envVar = std::getenv("RAPIDS_DATASET_ROOT_DIR"); rdrd = (envVar != NULL) ? envVar : RAPIDS_DATASET_ROOT_DIR; } return rdrd; }
GB_unop__identity_int16_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int16_int64) // op(A') function: GB (_unop_tran__identity_int16_int64) // C type: int16_t // A type: int64_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int16_t z = (int16_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int16_int64) ( int16_t *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; int16_t z = (int16_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; int16_t z = (int16_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int16_int64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
lu2.c
// lu2.c // // test program for blocked LU decomposition // // Time-stamp: <11/05/05 16:59:38 makino> //#define NOBLAS #include <sys/types.h> #include <sys/stat.h> #include <sys/mman.h> #include <fcntl.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <getopt.h> #include <emmintrin.h> typedef double v2df __attribute__((vector_size(16))); typedef union {v2df v; double s[2];}v2u; #include <lu2tlib.h> #include <lu2lib.h> void timer_init(); double cpusec(); double wsec(); #define RDIM (n+16) void copymats( int n, double a[n][RDIM], double a2[n][RDIM]) { int i, j; for(i=0;i<n;i++){ for(j=0;j<n+2;j++) a2[i][j] = a[i][j]; } } void copybvect( int n, double a[][RDIM], double b[]) { int i; for(i=0;i<n;i++)b[i] = a[i][n]; } void showresult(int n, double a[n][RDIM], double x[]) { int i, j; double emax = 0; for(i=0;i<n;i++){ int k; double b2=0; // printf("%3d: ", i); // for(j=0;j<n;j++) printf(" %10.3e", a[i][j]); for(j=0;j<n;j++) b2 += a[i][j] * x[j]; double err = b2-a[i][n]; emax = (fabs(err) > emax) ? fabs(err):emax; // printf(" %10.3e %10.3e %10.3e %10.3e \n", x[i], a[i][n], b2, err); } printf("Emax= %10.3e\n", emax); } void readmat( int n, double a[n][RDIM]) { int i, j; for(i=0;i<n;i++){ for(j=0;j<n+1;j++) scanf("%le", &(a[i][j])); } } void randomsetmat( int n, int seed, double a[n][RDIM]) { long int i, j; srand48((long) seed); for(i=0;i<n;i++){ // printf("i=%d\n", i); double * ap = a[i]; for(j=0;j<n;j++) { // ap[j]=drand48(); ap[j]=drand48()-0.5; } // printf("n, i=%d\n", i); // a[i][n]=1; a[i][n]=drand48()-0.5; } } void printmat( int n, double a[n][RDIM]) { int i, j; for(i=0;i<n;i++){ printf("%3d: ", i); for(j=0;j<n+1;j++) printf(" %10.3e", a[i][j]); printf("\n"); } printf("\n"); } void printsqmat( int n, double a[n][n]) { int i, j; for(i=0;i<n;i++){ printf("%3d: ", i); for(j=0;j<n;j++) printf(" %10.3e", a[i][j]); printf("\n"); } printf("\n"); } void backward_sub(int n,double a[n][RDIM], double b[]) { int i,j,k; for (i=0;i<n;i++)b[i] = a[i][n]; for(j=n-2;j>=0;j--) for(k=j+1;k<n;k++) b[j] -= b[k]*a[j][k]; } void lu( int n, double a[n][RDIM], double b[]) { int i, j, k; for(i=0;i<n-1;i++){ // select pivot double amax = fabs(a[i][i]); int p=i; for(j=i+1;j<n;j++){ if (fabs(a[j][i]) > amax){ amax = fabs(a[j][i]); p = j; } } // exchange rows if (p != i){ for(j=i;j<n+1;j++){ double tmp = a[p][j]; a[p][j] = a[i][j]; a[i][j]=tmp; } } // normalize row i double ainv = 1.0/a[i][i]; // fprintf(stderr,"%d %e\n", i, ainv); for(k=i+1;k<n+1;k++) a[i][k]*= ainv; // subtract row i from all lower rows for(j=i+1;j<n;j++){ // fprintf(stderr,"j=%d \n",j); for(k=i+1;k<n+1;k++) a[j][k] -= a[j][i] * a[i][k]; } } printmat(n,a); a[n-1][n] /= a[n-1][n-1]; backward_sub(n,a,b); } int findpivot(int n, double a[n][RDIM], int current) { double amax = fabs(a[current][current]); int i; int p=current; for(i=current+1;i<n;i++){ if (fabs(a[i][current]) > amax){ amax = fabs(a[i][current]); p = i; } } return p; } void scalerow( int n, double a[n][RDIM], double scale, int row, int cstart, int cend) { int j; BEGIN_TSC; int jmax = (cend+1-cstart)/2; v2df *a1 = (v2df*)(a[row]+cstart); v2df ss = (v2df){scale,scale}; for(j=0;j<jmax;j+=2){ __builtin_prefetch(a1+j+16,1,0); a1[j] *= ss; a1[j+1]*= ss; } END_TSC(t,1); } void swaprows(int n, double a[n][RDIM], int row1, int row2, int cstart, int cend) { /* WARNING: works only for row1 % 4 = 0 and RDIM >= n+4*/ int j; if (row1 != row2){ int jmax = (cend+1-cstart)/2; #ifdef TIMETEST BEGIN_TSC; #endif v2df *a1, *a2, tmp, tmp1; a1 = (v2df*)(a[row1]+cstart); a2 = (v2df*)(a[row2]+cstart); for(j=0;j<jmax;j+=2){ tmp = a1[j]; tmp1 = a1[j+1]; a1[j]=a2[j]; a1[j+1]=a2[j+1]; a2[j]=tmp; a2[j+1]=tmp1; __builtin_prefetch(a1+j+16,1,0); __builtin_prefetch(a2+j+16,1,0); // prefetch options: 1: for write, 0: read only // 0: need not be kept in cache // 3: should be there for as long as possible } #ifdef TIMETEST END_TSC(t,0); #endif } } void swaprows_simple(int n, double a[n][RDIM], int row1, int row2, int cstart, int cend) { /* WARNING: works only for row1 % 4 = 0 and RDIM >= n+4*/ int j; if (row1 != row2){ int jmax = (cend+1-cstart)/2; #if 1 v2df *a1, *a2, tmp, tmp1; a1 = (v2df*)(a[row1]+cstart); a2 = (v2df*)(a[row2]+cstart); for(j=0;j<jmax;j++){ tmp = a1[j]; a1[j]=a2[j]; a2[j]=tmp; } #endif #if 0 for(j=cstart;j<cend;j++){ double tmp = a[row1][j]; a[row1][j]=a[row2][j]; a[row2][j]=tmp; } #endif } } void swaprows_simple_with_scale(int n, double a[n][RDIM], int row1, int row2, int cstart, int cend, double scale) { /* WARNING: works only for row1 % 4 = 0 and RDIM >= n+4*/ int j; if (row1 != row2){ int jmax = (cend+1-cstart)/2; #if 1 v2df *a1, *a2, tmp, tmp1; v2df ss = (v2df){scale,scale}; a1 = (v2df*)(a[row1]+cstart); a2 = (v2df*)(a[row2]+cstart); for(j=0;j<(jmax & (0xfffffffe));j+=2){ __builtin_prefetch(a1+j+32,1,0); __builtin_prefetch(a2+j+32,1,0); tmp = a1[j]; a1[j]=a2[j]; a2[j]=tmp*ss; tmp1 = a1[j+1]; a1[j+1]=a2[j+1]; a2[j+1]=tmp1*ss; } if (jmax &1){ tmp = a1[jmax-1]; a1[jmax-1]=a2[jmax-1]; a2[jmax-1]=tmp*ss; } #endif #if 0 for(j=cstart;j<cend;j++){ double tmp = a[row1][j]; a[row1][j]=a[row2][j]; a[row2][j]=tmp; } #endif }else{ scalerow(n,a,scale ,row2,cstart,cend); } } void swapelements(double b[], int i1, int i2) { double tmp; tmp=b[i1]; b[i1]=b[i2]; b[i2]=tmp; } void vvmulandsub(int n, double a[n][RDIM], int current, int c0, int c1, int r0,int r1) { int j,k; for(j=r0;j<r1;j++) for (k=c0;k<c1;k++) a[j][k] -= a[j][current]*a[current][k]; } void mmmulandsub(int n, double a[n][RDIM], int m0, int m1, int c0, int c1, int r0,int r1) { int j,k,l; if (c1-c0 <16){ int np=n+1; matmul_for_small_nk(RDIM, (double(*)[]) (&a[r0][m0]), RDIM, (double(*)[]) (&a[m0][c0]), RDIM, (double(*)[]) (&a[r0][c0]), r1-r0, m1-m0, c1-c0); }else{ #ifndef NOBLAS mydgemm(r1-r0, c1-c0, m1-m0, -1.0, &(a[r0][m0]), RDIM, &(a[m0][c0]), RDIM, 1.0, &(a[r0][c0]), RDIM ); // example: // r0, m0 = i+m,i // m0, c0 = i, i+m // r0, c0 = i+m, i+m //r1-r0 = n-i-m // c1-c0 = iend-i-m // m1-m0 = m #else for(j=r0;j<r1;j++) for (k=c0;k<c1;k++) for (l=m0; l<m1; l++) a[j][k] -= a[j][l]*a[l][k]; #endif } } static int nswap; void column_decomposition(int n, double a[n][RDIM], int m, int pv[], int i) { int j, k; int ip,ii; double ainv; for(ip=0;ip<m;ip++){ ii=i+ip; int p = findpivot(n,a,ii); if (fabs(a[p][ii]) > 2* fabs(a[ii][ii])){ pv[ip]=p; swaprows(n,a,p,ii,i,i+m); nswap++; }else{ pv[ip]=ii; } // normalize row ii ainv = 1.0/a[ii][ii]; scalerow(n,a,ainv,ii,i,ii); scalerow(n,a,ainv,ii,ii+1,i+m); // subtract row ii from all lower rows vvmulandsub(n, a, ii, ii+1, i+m, ii+1, n); } } static void solve_triangle_for_unit_mat_internal(int n, double a[][RDIM], int nb, double b[][nb], int m) { int ii,j,k; for(ii=0;ii<m;ii++) for(j=ii+1;j<m;j++) for (k=0;k<m;k++) b[j][k] -= a[j][ii]*b[ii][k]; } void solve_triangle_for_unit_mat(int n, double a[n][RDIM], int nb, double b[nb][nb], int m, int i); static void solve_triangle_for_unit_mat_recursive(int n, double a[][RDIM], int nb, double b[][nb], int m); static void solve_triangle_for_unit_mat_recursive_0(int n, double a[][RDIM], int nb, double b[][nb], int m) { int i,ii,j,k; if (m < 16){ solve_triangle_for_unit_mat_internal(n, a, nb, b,m); return; } const int mhalf = m/2; solve_triangle_for_unit_mat_recursive(n, a, nb, b,mhalf); mydgemm( mhalf, mhalf, mhalf, -1.0, &(a[mhalf][0]), RDIM, &(b[0][0]), nb, 1.0, &(b[mhalf][0]),nb ); double bwork[mhalf][mhalf]; double bwork2[mhalf][mhalf]; for (j=0;j<mhalf;j++) for (k=0;k<mhalf;k++)bwork[j][k]=0.0; for (j=0;j<mhalf;j++)bwork[j][j]=1.0; solve_triangle_for_unit_mat_recursive(n, (double(*)[])(&a[mhalf][mhalf]), mhalf, bwork,mhalf); for(i=0;i<mhalf;i++) for(j=0;j<mhalf;j++) bwork2[i][j]=b[i+mhalf][j]; mydgemm(mhalf, mhalf, mhalf, 1.0, (double*)bwork,mhalf, (double*)bwork2, mhalf, 0.0, &(b[mhalf][0]),nb ); solve_triangle_for_unit_mat_recursive(n, (double(*)[])(&a[mhalf][mhalf]), nb, (double(*)[])(&b[mhalf][mhalf]), mhalf); } static void solve_triangle_for_unit_mat_recursive(int n, double a[][RDIM], int nb, double b[][nb], int m) { int i,ii,j,k; if (m < 16){ // apparently, too deep recursion here // causes large error.... // might need some fix solve_triangle_for_unit_mat_internal(n, a, nb, b,m); return; } const int mhalf = m/2; solve_triangle_for_unit_mat_recursive(n, a, nb, b,mhalf); mydgemm( mhalf, mhalf, mhalf, -1.0, &(a[mhalf][0]), RDIM, &(b[0][0]), nb, 1.0, &(b[mhalf][0]),nb ); double bwork[mhalf][mhalf]; double bwork2[mhalf][mhalf]; for (j=0;j<mhalf;j++) for (k=0;k<mhalf;k++)bwork[j][k]=0.0; for (j=0;j<mhalf;j++)bwork[j][j]=1.0; solve_triangle_for_unit_mat_recursive(n, (double(*)[])(&a[mhalf][mhalf]), mhalf, bwork,mhalf); for(i=0;i<mhalf;i++) for(j=0;j<mhalf;j++) bwork2[i][j]=b[i+mhalf][j]; mydgemm(mhalf, mhalf, mhalf, 1.0, (double*)bwork,mhalf, (double*)bwork2, mhalf, 0.0, &(b[mhalf][0]),nb ); for (j=0;j<mhalf;j++) for (k=0;k<j+1;k++)b[mhalf+j][mhalf+k]=bwork[j][k]; } void solve_triangle_for_unit_mat(int n, double a[n][RDIM], int nb, double b[nb][nb], int m, int i) { int ii,j,k; BEGIN_TSC; for (j=0;j<nb;j++) for (k=0;k<nb;k++)b[j][k]=0.0; for (j=0;j<nb;j++)b[j][j]=1.0; solve_triangle_for_unit_mat_recursive(n, (double(*)[]) (&a[i][i]), nb, b, m); END_TSC(t,8); } void solve_triangle(int n, double a[n][RDIM], int m, double awork[][n], int i, int iend) { int ii,j,k; // current =ii // c0=i+m // c1=iend // r0=ii+1 // r1 = i+m double b[m][m]; double work[m]; solve_triangle_for_unit_mat(n,a,m,b,m,i); BEGIN_TSC; for(j=i;j<i+m;j++){ for (k=i+m;k<iend;k++){ awork[j-i][k-i-m]=a[j][k]; #ifdef NOBLAS a[j][k]=0; #endif } } #ifndef NOBLAS mydgemm(m, iend-i-m, m, 1.0, &(b[0][0]), m, &(awork[0][0]), n, 0.0, &(a[i][i+m]), RDIM ); #else for (k=i+m;k<iend;k++){ for(j=0;j<m;j++) for(ii=0;ii<j+1;ii++) a[j+i][k]+= b[j][ii]*awork[ii][k-i-m]; } #endif END_TSC(t,9); } void process_right_part(int n, double a[n][RDIM], int m, double awork[][n], int pv[], int i, int iend) { int ii; // exchange rows if ((iend-i-m) > m){ // if(0){ int k; int nt=4; #ifdef TIMETEST BEGIN_TSC; #endif #pragma omp parallel for private(k,ii) for(k=0;k<nt;k++){ int di = (16+iend-i-m)/nt; int istart = i+m+di*k; int iend2 = istart + di; if (iend2> iend) iend2 = iend; // fprintf(stderr," swaprows %d %d %d %d\n",istart,iend2,i+m,iend); for(ii=i;ii<i+m;ii++){ // swaprows_simple(n,a,pv[ii-i],ii,istart,iend2); // scalerow(n,a,1.0/a[ii][ii] ,ii,istart,iend2); swaprows_simple_with_scale(n,a,pv[ii-i],ii,istart,iend2, 1.0/a[ii][ii] ); } } // normalize rows #ifdef TIMETEST END_TSC(t,0); #endif #pragma omp parallel for private(ii) for(ii=i;ii<i+m;ii++){ // scalerow(n,a,1.0/a[ii][ii] ,ii,i+m,iend); } }else{ for(ii=i;ii<i+m;ii++){ swaprows(n,a,pv[ii-i],ii,i+m,iend); scalerow(n,a,1.0/a[ii][ii] ,ii,i+m,iend); } // normalize rows for(ii=i;ii<i+m;ii++){ } } // subtract rows (within i-i+m-1) solve_triangle(n,a,m,awork, i,iend); // for(ii=i;ii<i+m;ii++){ // vvmulandsub(n, a, ii, i+m, iend, ii+1, i+m); // } // subtract rows i-i+m-1 from all lower rows mmmulandsub(n, a, i,i+m, i+m, iend, i+m, n); } void transpose_rowtocol8(int n, double a[][RDIM], double at[][n], int istart) { int i,j,k; const int m=8; double atmp[m][m] __attribute__((align(128))); #pragma omp parallel for private(i,j,k,atmp) for(i=istart;i<n;i+=m){ for(k=0;k<m;k++){ double *ak = a[i+k]; atmp[0][k] =ak[0]; atmp[1][k] =ak[1]; atmp[2][k] =ak[2]; atmp[3][k] =ak[3]; atmp[4][k] =ak[4]; atmp[5][k] =ak[5]; atmp[6][k] =ak[6]; atmp[7][k] =ak[7]; } for(j=0;j<m;j++){ v2df * atp = (v2df*) atmp[j]; v2df * ap = (v2df*) (at[j]+i); *(ap)=*(atp); *(ap+1)=*(atp+1); *(ap+2)=*(atp+2); *(ap+3)=*(atp+3); } } } void transpose_rowtocol16_0(int n, double a[][RDIM], double at[][n], int istart) { int i,j,k; const int m=16; const int m4=16; double atmp[m][m4]; int mend; #pragma omp parallel for private(i,j,k,atmp) for(i=istart;i<n;i+=m4){ mend = m4; if (mend+i > n) mend = n-i; for(k=0;k<mend;k++){ double *ak = a[i+k]; // __builtin_prefetch(a+i+k+m,0,0); atmp[0][k] =ak[0]; atmp[1][k] =ak[1]; atmp[2][k] =ak[2]; atmp[3][k] =ak[3]; atmp[4][k] =ak[4]; atmp[5][k] =ak[5]; atmp[6][k] =ak[6]; atmp[7][k] =ak[7]; atmp[8][k] =ak[8]; atmp[9][k] =ak[9]; atmp[10][k] =ak[10]; atmp[11][k] =ak[11]; atmp[12][k] =ak[12]; atmp[13][k] =ak[13]; atmp[14][k] =ak[14]; atmp[15][k] =ak[15]; } for(j=0;j<mend;j++){ v2df * atp = (v2df*) atmp[j]; v2df * ap = (v2df*) (at[j]+i); *(ap)=*(atp); *(ap+1)=*(atp+1); *(ap+2)=*(atp+2); *(ap+3)=*(atp+3); *(ap+4)=*(atp+4); *(ap+5)=*(atp+5); *(ap+6)=*(atp+6); *(ap+7)=*(atp+7); } } } void transpose_rowtocol16_1(int n, double a[][RDIM], double at[][n], int istart) { int i,j,k; const int m=16; double atmp[m][m]; int mend; //#pragma omp parallel for private(i,j,k,atmp) for(i=istart;i<n;i+=m){ for(k=0;k<m;k++){ v2df * ak = (v2df*) a[i+k]; v2df * akk = (v2df*) atmp[k]; akk[0] =ak[0]; akk[1] =ak[1]; akk[2] =ak[2]; akk[3] =ak[3]; akk[4] =ak[4]; akk[5] =ak[5]; akk[6] =ak[6]; akk[7] =ak[7]; } for(j=0;j<m;j++){ v2df * atk= (v2df*)(at[j]+i); atk[0]=(v2df){atmp[0][j],atmp[1][j]}; atk[1]=(v2df){atmp[2][j],atmp[3][j]}; atk[2]=(v2df){atmp[4][j],atmp[5][j]}; atk[3]=(v2df){atmp[6][j],atmp[7][j]}; atk[4]=(v2df){atmp[8][j],atmp[9][j]}; atk[5]=(v2df){atmp[10][j],atmp[11][j]}; atk[6]=(v2df){atmp[12][j],atmp[13][j]}; atk[7]=(v2df){atmp[14][j],atmp[15][j]}; } } } void transpose_rowtocol16(int n, double a[][RDIM], double at[][n], int istart) { int i,j,k; const int m=16; int mend; #pragma omp parallel for private(i,j,k) for(i=istart;i<n;i+=m){ double atmp[m][m]; // BEGIN_TSC; for(k=0;k<m;k++){ v2df * ak = (v2df*) a[i+k]; v2df * akk = (v2df*) atmp[k]; asm("prefetchnta %0"::"m"(a[i+k+m*2][0]):"memory"); asm("prefetchnta %0"::"m"(a[i+k+m*2][8]):"memory"); // __builtin_prefetch(a[i+k+m*2],0,0); // __builtin_prefetch(a[i+k+m*2]+8,0,0); akk[0] =ak[0]; akk[1] =ak[1]; akk[2] =ak[2]; akk[3] =ak[3]; akk[4] =ak[4]; akk[5] =ak[5]; akk[6] =ak[6]; akk[7] =ak[7]; } // END_TSC(t,17); // { // BEGIN_TSC; for(j=0;j<m;j++){ v2df * atk= (v2df*)(at[j]+i); atk[0]=(v2df){atmp[0][j],atmp[1][j]}; atk[1]=(v2df){atmp[2][j],atmp[3][j]}; atk[2]=(v2df){atmp[4][j],atmp[5][j]}; atk[3]=(v2df){atmp[6][j],atmp[7][j]}; atk[4]=(v2df){atmp[8][j],atmp[9][j]}; atk[5]=(v2df){atmp[10][j],atmp[11][j]}; atk[6]=(v2df){atmp[12][j],atmp[13][j]}; atk[7]=(v2df){atmp[14][j],atmp[15][j]}; } // END_TSC(t2,18); // } int istart) } } void transpose_rowtocol16_3(int n, double a[][RDIM], double at[][n], int istart) { int i,j,k; const int m=16; double atmp[m][m]; double atmp2[m][m]; int mend; // BEGIN_TSC; //#pragma omp parallel for private(i,j,k,atmp) for(i=istart;i<n;i+=m){ for(k=0;k<m;k++){ v2df * ak = (v2df*) a[i+k]; v2df * akk = (v2df*) atmp[k]; asm("prefetchnta %0"::"m"(a[i+k+m*2][0]):"memory"); asm("prefetchnta %0"::"m"(a[i+k+m*2][8]):"memory"); // __builtin_prefetch(a[i+k+m*2],0,0); // __builtin_prefetch(a[i+k+m*2]+8,0,0); akk[0] =ak[0]; akk[1] =ak[1]; akk[2] =ak[2]; akk[3] =ak[3]; akk[4] =ak[4]; akk[5] =ak[5]; akk[6] =ak[6]; akk[7] =ak[7]; } { for(j=0;j<m;j++){ v2df * atk= (v2df*)(atmp2[j]); atk[0]=(v2df){atmp[0][j],atmp[1][j]}; atk[1]=(v2df){atmp[2][j],atmp[3][j]}; atk[2]=(v2df){atmp[4][j],atmp[5][j]}; atk[3]=(v2df){atmp[6][j],atmp[7][j]}; atk[4]=(v2df){atmp[8][j],atmp[9][j]}; atk[5]=(v2df){atmp[10][j],atmp[11][j]}; atk[6]=(v2df){atmp[12][j],atmp[13][j]}; atk[7]=(v2df){atmp[14][j],atmp[15][j]}; } } { for(j=0;j<m;j++){ v2df * atk= (v2df*)(at[j]+i); v2df * attk= (v2df*)(atmp2[j]); atk[0]=attk[0]; atk[1]=attk[1]; atk[2]=attk[2]; atk[3]=attk[3]; atk[4]=attk[4]; atk[5]=attk[5]; atk[6]=attk[6]; atk[7]=attk[7]; } } } // END_TSC(t,2); } void transpose_rowtocol16_4(int n, double a[][RDIM], double at[][n], int istart) { int i,j,k; const int m=16; const int mh=8; v2df atmp[m][mh]; double atmp2[m][m]; int mend; // BEGIN_TSC; //#pragma omp parallel for private(i,j,k,atmp) for(i=istart;i<n;i+=m){ for(k=0;k<m;k++){ v2df * ak = (v2df*) a[i+k]; v2df * akk = atmp[k]; asm("prefetchnta %0"::"m"(a[i+k+m*2][0]):"memory"); asm("prefetchnta %0"::"m"(a[i+k+m*2][8]):"memory"); // __builtin_prefetch(a[i+k+m*2],0,0); // __builtin_prefetch(a[i+k+m*2]+8,0,0); akk[0] =ak[0]; akk[1] =ak[1]; akk[2] =ak[2]; akk[3] =ak[3]; akk[4] =ak[4]; akk[5] =ak[5]; akk[6] =ak[6]; akk[7] =ak[7]; } { for(j=0;j<m;j+=2){ v2df * atk= (v2df*)(atmp2[j]); int jh = j>>1; // atk[0]=__builtin_ia32_shufpd(atmp[0][jh], // atmp[1][jh],0x00); *(__m128d *)atk = _mm_shuffle_pd (*(__m128d *)(atmp[0]+jh), *(__m128d *)(atmp[1]+jh), 0x00); atk[1]=__builtin_ia32_shufpd(atmp[2][jh], atmp[3][jh],0x00); atk[2]=__builtin_ia32_shufpd(atmp[4][jh], atmp[5][jh],0x00); atk[3]=__builtin_ia32_shufpd(atmp[6][jh], atmp[7][jh],0x00); atk[4]=__builtin_ia32_shufpd(atmp[8][jh], atmp[9][jh],0x00); atk[5]=__builtin_ia32_shufpd(atmp[10][jh], atmp[11][jh],0x00); atk[6]=__builtin_ia32_shufpd(atmp[12][jh], atmp[13][jh],0x00); atk[7]=__builtin_ia32_shufpd(atmp[14][jh], atmp[15][jh],0x00); atk= (v2df*)(atmp2[j+1]); atk[0]=__builtin_ia32_shufpd(atmp[0][jh], atmp[1][jh],0xff); atk[1]=__builtin_ia32_shufpd(atmp[2][jh], atmp[3][jh],0xff); atk[2]=__builtin_ia32_shufpd(atmp[4][jh], atmp[5][jh],0xff); atk[3]=__builtin_ia32_shufpd(atmp[6][jh], atmp[7][jh],0xff); atk[4]=__builtin_ia32_shufpd(atmp[8][jh], atmp[9][jh],0xff); atk[5]=__builtin_ia32_shufpd(atmp[10][jh], atmp[11][jh],0xff); atk[6]=__builtin_ia32_shufpd(atmp[12][jh], atmp[13][jh],0xff); atk[7]=__builtin_ia32_shufpd(atmp[14][jh], atmp[15][jh],0xff); } } { for(j=0;j<m;j++){ v2df * atk= (v2df*)(at[j]+i); v2df * attk= (v2df*)(atmp2[j]); atk[0]=attk[0]; atk[1]=attk[1]; atk[2]=attk[2]; atk[3]=attk[3]; atk[4]=attk[4]; atk[5]=attk[5]; atk[6]=attk[6]; atk[7]=attk[7]; } } } // END_TSC(t,2); } void transpose_rowtocol(int n, double a[][RDIM],int m, double at[][n], int istart) { int i,j,k; double atmp[m][m]; BEGIN_TSC; if (m == 8){ transpose_rowtocol8(n,a,at,istart); END_TSC(t,2); return; } if (m == 16){ transpose_rowtocol16(n,a,at,istart); END_TSC(t,2); return; } for(i=istart;i<n;i+=m){ for(k=0;k<m;k++){ for(j=0;j<m;j++){ atmp[j][k] =a[i+k][j]; } } for(j=0;j<m;j++){ for(k=0;k<m;k++){ at[j][i+k]=atmp[j][k]; } } } END_TSC(t,2); } void transpose_coltorow8(int n, double a[][RDIM], double at[][n], int istart) { int i,j,k; const int m=8; double atmp[m][m]; #pragma omp parallel for private(i,j,k,atmp) for(i=istart;i<n;i+=m){ for(j=0;j<m;j++){ double * atj = at[j]+i; // __builtin_prefetch(at[j]+i+m+m,0,0); // inserting prefetch here causes speed down... atmp[0][j] =atj[0]; atmp[1][j] =atj[1]; atmp[2][j] =atj[2]; atmp[3][j] =atj[3]; atmp[4][j] =atj[4]; atmp[5][j] =atj[5]; atmp[6][j] =atj[6]; atmp[7][j] =atj[7]; } for(k=0;k<m;k++){ v2df * atp = (v2df*) atmp[k]; v2df * ap = (v2df*) a[i+k]; *(ap)=*(atp); *(ap+1)=*(atp+1); *(ap+2)=*(atp+2); *(ap+3)=*(atp+3); } } } void transpose_coltorow16(int n, double a[][RDIM], double at[][n], int istart) { int i,j,k; const int m=16; #pragma omp parallel for private(i,j,k) for(i=istart;i<n;i+=m){ double atmp[m][m]; for(k=0;k<m;k++){ v2df * ak = (v2df*) (at[k]+i); v2df * akk = (v2df*) atmp[k]; // asm("prefetchnta %0"::"m"(at[k][i+m*3]):"memory"); // asm("prefetchnta %0"::"m"(at[k][i+m*3+8]):"memory"); asm("prefetcht2 %0"::"m"(at[k][i+m*3]):"memory"); asm("prefetcht2 %0"::"m"(at[k][i+m*3+8]):"memory"); akk[0] =ak[0]; akk[1] =ak[1]; akk[2] =ak[2]; akk[3] =ak[3]; akk[4] =ak[4]; akk[5] =ak[5]; akk[6] =ak[6]; akk[7] =ak[7]; } for(j=0;j<m;j++){ v2df * atk= (v2df*)(a[i+j]); atk[0]=(v2df){atmp[0][j],atmp[1][j]}; atk[1]=(v2df){atmp[2][j],atmp[3][j]}; atk[2]=(v2df){atmp[4][j],atmp[5][j]}; atk[3]=(v2df){atmp[6][j],atmp[7][j]}; atk[4]=(v2df){atmp[8][j],atmp[9][j]}; atk[5]=(v2df){atmp[10][j],atmp[11][j]}; atk[6]=(v2df){atmp[12][j],atmp[13][j]}; atk[7]=(v2df){atmp[14][j],atmp[15][j]}; } } } void transpose_coltorow16_0(int n, double a[][RDIM], double at[][n], int istart) { int i,j,k; const int m=16; double atmp[m][m]; #pragma omp parallel for private(i,j,k,atmp) for(i=istart;i<n;i+=m){ for(j=0;j<m;j++){ double * atj = at[j]+i; // __builtin_prefetch(at[j]+i+m+m,0,0); // inserting prefetch here causes speed down... atmp[0][j] =atj[0]; atmp[1][j] =atj[1]; atmp[2][j] =atj[2]; atmp[3][j] =atj[3]; atmp[4][j] =atj[4]; atmp[5][j] =atj[5]; atmp[6][j] =atj[6]; atmp[7][j] =atj[7]; atmp[8][j] =atj[8]; atmp[9][j] =atj[9]; atmp[10][j] =atj[10]; atmp[11][j] =atj[11]; atmp[12][j] =atj[12]; atmp[13][j] =atj[13]; atmp[14][j] =atj[14]; atmp[15][j] =atj[15]; } for(k=0;k<m;k++){ v2df * atp = (v2df*) atmp[k]; v2df * ap = (v2df*) a[i+k]; *(ap)=*(atp); *(ap+1)=*(atp+1); *(ap+2)=*(atp+2); *(ap+3)=*(atp+3); *(ap+4)=*(atp+4); *(ap+5)=*(atp+5); *(ap+6)=*(atp+6); *(ap+7)=*(atp+7); } } } void transpose_coltorow(int n, double a[][RDIM],int m, double at[][n], int istart) { int i,j,k; double atmp[m][m]; BEGIN_TSC; if (m == 8){ transpose_coltorow8(n,a,at,istart); END_TSC(t,3); return; } if (m == 16){ transpose_coltorow16(n,a,at,istart); END_TSC(t,3); return; } for(i=istart;i<n;i+=m){ for(j=0;j<m;j++){ double * atj = at[j]+i; for(k=0;k<m;k+=4){ atmp[k][j] =atj[k]; atmp[k+1][j] =atj[k+1]; atmp[k+2][j] =atj[k+2]; atmp[k+3][j] =atj[k+3]; } } for(k=0;k<m;k++){ double * aik = a[i+k]; for(j=0;j<m;j+=4){ aik[j] = atmp[k][j]; aik[j+1] = atmp[k][j+1]; aik[j+2] = atmp[k][j+2]; aik[j+3] = atmp[k][j+3]; } } } END_TSC(t,3); } void column_decomposition_with_transpose(int n, double a[n][RDIM], int m, double awork[][n], int pv[], int i) { int k,j; transpose_rowtocol(n, (double(*)[]) (&a[0][i]),m, awork,i); // fprintf(stderr,"call cm column recursive %d %d\n", i, m); cm_column_decomposition_recursive( n, awork,m,pv,i); // fprintf(stderr,"return cm column recursive %d %d\n", i, m); transpose_coltorow(n, (double(*)[]) (&a[0][i]),m, awork,i); } void column_decomposition_recursive(int n, double a[n][RDIM], int m, double awork[][n], int pv[], int i) { int j, k; int ip,ii; double ainv; // fprintf(stderr,"column recursive %d %d\n", i, m); if (m <= 16){ // perform non-recursive direct decomposition BEGIN_TSC; column_decomposition_with_transpose(n, a, m,awork, pv,i); END_TSC(t,20); }else{ // process the left half by recursion column_decomposition_recursive(n, a, m/2, awork, pv,i); // process the right half process_right_part(n,a,m/2,awork, pv,i,i+m); column_decomposition_recursive(n, a, m/2, awork, pv+m/2,i+m/2); // process the swap of rows for the left half for(ii=i+m/2;ii<i+m;ii++){ swaprows(n,a,pv[ii-i],ii,i,i+m/2); } // normalize rows for(ii=i+m/2;ii<i+m;ii++){ scalerow(n,a,1.0/a[ii][ii] ,ii,i,i+m/2); } } } void lumcolumn( int n, double a[n][RDIM], double b[], int m, double awork[][n],int pv[], int recursive) { int i; nswap=0; for(i=0;i<n;i+=m){ BEGIN_TSC; // fprintf(stderr,"lumcolumn i=%d\n", i); if (recursive){ column_decomposition_recursive(n, a, m, awork, pv,i); }else{ column_decomposition(n, a, m, pv,i); } // fprintf(stderr,"lumcolumn column end\n"); process_right_part(n,a,m,awork, pv,i,n+1); // fprintf(stderr,"lumcolumn right end\n"); END_TSC(t,19); } backward_sub(n,a,b); } typedef struct parmstruct{ int n; int seed; int nb; int boardid; int nboards; int usehugepage; } PARMS, *PPARMS; void usage() { fprintf(stderr,"lu2 options:\n"); fprintf(stderr," -h: This help\n"); fprintf(stderr," -s: seed (default=1)\n"); fprintf(stderr," -n: size of matrix (default=8192)\n"); fprintf(stderr," -b: block size (default=2048)\n"); fprintf(stderr," -B: board id (default=0)\n"); fprintf(stderr," -N: number of boards (default=1)\n"); fprintf(stderr," -g: usehugetlbfs (default=no)\n"); } extern char *optarg; extern int optind; void print_parms(FILE* stream, PPARMS parms) { fprintf(stream,"N=%d Seed=%d NB=%d usehuge=%d\n", parms->n,parms->seed,parms->nb, parms->usehugepage); fprintf(stream,"Board id=%d # boards=%d\n", parms->boardid, parms->nboards); } void read_parms(int argc, char * argv[], PPARMS parms) { int ch; static struct option longopts[] = { { "help", no_argument, 0, 'h' }, { "block_size", optional_argument, NULL, 'b' }, { "board_id", optional_argument, NULL, 'B' }, { "nboards", optional_argument, NULL, 'N' }, { "seed", optional_argument, NULL, 's' }, { "ndim_matrix", required_argument, NULL, 'n' }, { "usehugepage", no_argument, 0, 'g' }, { NULL, 0, NULL, 0 } }; parms->seed=1; parms->n=8192; parms->nb = 2048; parms->boardid = 0; parms->nboards=1; parms->usehugepage = 0; while((ch=getopt_long(argc,argv,"B:N:b:ghn:s:",longopts, NULL))!= -1){ fprintf(stderr,"optchar = %c optarg=%s\n", ch,optarg); switch (ch) { case 'b': parms->nb = atoi(optarg); break; case 'B': parms->boardid = atoi(optarg); break; case 'N': parms->nboards = atoi(optarg); break; case 'g': parms->usehugepage = 1; break; case 's': parms->seed = atoi(optarg); break; case 'n': parms->n = atoi(optarg); break; case 'h': usage(); exit(1); case '?':usage(); exit(1); break; default:break; } } argc -= optind; argv += optind; print_parms(stderr, parms); print_parms(stdout, parms); } int main(int argc, char * argv[]) { int n, seed, nb, boardid; PARMS parms; int i; fprintf(stderr,"main top omp_max_threads=%d procs=%d\n", omp_get_max_threads(),omp_get_num_procs()); read_parms(argc, argv, &parms); n = parms.n; nb = parms.nb; seed = parms.seed; boardid = parms.boardid; gdrsetboardid(parms.boardid); gdrsetnboards(parms.nboards); #if 0 fprintf(stderr, "Enter n, seed, nb:"); scanf("%d%d%d", &n, &seed, &nb); printf("N=%d Seed=%d NB=%d\n", n,seed,nb); #endif double (*a)[]; double (*acopy)[]; double (*awork)[]; int * pv; double *b, *bcopy; long int nl=n; if (parms.usehugepage){ char fname[128]; sprintf(fname,"/mnt/huge/aaa-%d",boardid); int fd = open(fname, O_RDWR|O_CREAT, 0777); size_t size = ((long)(sizeof(double)*((long)nl)* (long)(RDIM))+0x400000)&0xffffffffffc00000L; a = (double(*)[]) mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); size_t worksize = ((sizeof(double)*nb*n)+0x400000)&0xffc00000; off_t offset = (off_t) size; awork = (double(*)[]) mmap(0, worksize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, offset); // printf("a, awork offset size= %lx %lx %lx %lx %lx\n", // (long) (a), (long) (awork), // (long) (awork)-(long) (a), (long) offset, (long)(size)); // printf("size of size_t and off_t long= %d %d %d\n", sizeof(size_t), // sizeof(off_t), sizeof(long)); }else{ a = (double(*)[]) malloc(sizeof(double)*n*(RDIM)); awork = (double(*)[]) malloc(sizeof(double)*nb*n); } b = (double*)malloc(sizeof(double)*n); bcopy = (double*)malloc(sizeof(double)*n); pv = (int*)malloc(sizeof(int)*n); reset_gdr(RDIM, a, nb, awork, n); if (seed == 0){ readmat(n,a); }else{ randomsetmat(n,seed,a); } fprintf(stderr,"read/set mat end\n"); // copymats(n,a,acopy); // copybvect(n,a,bcopy); fprintf(stderr,"copy mat end\n"); // printmat(n,a,b); // lu2columnv2(n,a,b); // lu2columnv2(n,a,b); // lub(n,a,b,NBK); // printmat(n,a,b); // showresult(n,acopy, b, bcopy); // copymats(n,acopy,bcopy,a, b); // lu(n,a,b); timer_init(); init_timer(); fprintf(stderr,"before lumcolumn omp_max_threads=%d procs=%d\n", omp_get_max_threads(),omp_get_num_procs()); lumcolumn(n,a,b,nb,awork,pv,1); // lu(n,a,b); double ctime=cpusec(); double wtime=wsec(); if (seed == 0){ readmat(n,a); }else{ randomsetmat(n,seed,a); } showresult(n,a, b); double nd = n; double speed = nd*nd*nd*2.0/3.0/wtime/1e9; printf("Nswap=%d cpsec = %g wsec=%g %g Gflops\n", nswap, ctime, wtime, speed); print_timers((double)n, (double)nb ); return 0; }
gimplify.c
/* * Copyright (C) 2007. QLogic Corporation. All Rights Reserved. */ /* Tree lowering pass. This pass converts the GENERIC functions-as-trees tree representation into the GIMPLE form. Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. Major work done by Sebastian Pop <s.pop@laposte.net>, Diego Novillo <dnovillo@redhat.com> and Jason Merrill <jason@redhat.com>. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "rtl.h" #include "varray.h" #include "tree-gimple.h" #include "tree-inline.h" #include "diagnostic.h" #include "langhooks.h" #include "langhooks-def.h" #include "tree-flow.h" #include "cgraph.h" #include "timevar.h" #include "except.h" #include "hashtab.h" #include "flags.h" #include "real.h" #include "function.h" #include "output.h" #include "expr.h" #include "ggc.h" #include "toplev.h" #include "target.h" #include "optabs.h" #include "pointer-set.h" #ifdef KEY #include "gspin-gcc-interface.h" #endif enum gimplify_omp_var_data { GOVD_SEEN = 1, GOVD_EXPLICIT = 2, GOVD_SHARED = 4, GOVD_PRIVATE = 8, GOVD_FIRSTPRIVATE = 16, GOVD_LASTPRIVATE = 32, GOVD_REDUCTION = 64, GOVD_LOCAL = 128, GOVD_DEBUG_PRIVATE = 256, GOVD_PRIVATE_OUTER_REF = 512, GOVD_DATA_SHARE_CLASS = (GOVD_SHARED | GOVD_PRIVATE | GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE | GOVD_REDUCTION | GOVD_LOCAL) }; enum omp_region_type { ORT_WORKSHARE = 0, ORT_TASK = 1, ORT_PARALLEL = 2, ORT_COMBINED_PARALLEL = 3 }; struct gimplify_omp_ctx { struct gimplify_omp_ctx *outer_context; splay_tree variables; struct pointer_set_t *privatized_types; location_t location; enum omp_clause_default_kind default_kind; enum omp_region_type region_type; }; struct gimplify_ctx { struct gimplify_ctx *prev_context; tree current_bind_expr; tree temps; tree conditional_cleanups; tree exit_label; tree return_temp; VEC(tree,heap) *case_labels; /* The formal temporary table. Should this be persistent? */ htab_t temp_htab; int conditions; bool save_stack; bool into_ssa; }; static struct gimplify_ctx *gimplify_ctxp; static struct gimplify_omp_ctx *gimplify_omp_ctxp; /* Formal (expression) temporary table handling: Multiple occurrences of the same scalar expression are evaluated into the same temporary. */ typedef struct gimple_temp_hash_elt { tree val; /* Key */ tree temp; /* Value */ } elt_t; /* Forward declarations. */ static enum gimplify_status gimplify_compound_expr (tree *, tree *, bool); #ifdef ENABLE_CHECKING static bool cpt_same_type (tree a, tree b); #endif /* Return a hash value for a formal temporary table entry. */ static hashval_t gimple_tree_hash (const void *p) { tree t = ((const elt_t *) p)->val; return iterative_hash_expr (t, 0); } /* Compare two formal temporary table entries. */ static int gimple_tree_eq (const void *p1, const void *p2) { tree t1 = ((const elt_t *) p1)->val; tree t2 = ((const elt_t *) p2)->val; enum tree_code code = TREE_CODE (t1); if (TREE_CODE (t2) != code || TREE_TYPE (t1) != TREE_TYPE (t2)) return 0; if (!operand_equal_p (t1, t2, 0)) return 0; /* Only allow them to compare equal if they also hash equal; otherwise results are nondeterminate, and we fail bootstrap comparison. */ gcc_assert (gimple_tree_hash (p1) == gimple_tree_hash (p2)); return 1; } /* Set up a context for the gimplifier. */ void push_gimplify_context (void) { struct gimplify_ctx *c; c = (struct gimplify_ctx *) xcalloc (1, sizeof (struct gimplify_ctx)); c->prev_context = gimplify_ctxp; if (optimize) c->temp_htab = htab_create (1000, gimple_tree_hash, gimple_tree_eq, free); gimplify_ctxp = c; } /* Tear down a context for the gimplifier. If BODY is non-null, then put the temporaries into the outer BIND_EXPR. Otherwise, put them in the unexpanded_var_list. */ void pop_gimplify_context (tree body) { struct gimplify_ctx *c = gimplify_ctxp; tree t; gcc_assert (c && !c->current_bind_expr); gimplify_ctxp = c->prev_context; for (t = c->temps; t ; t = TREE_CHAIN (t)) DECL_GIMPLE_FORMAL_TEMP_P (t) = 0; if (body) declare_vars (c->temps, body, false); else record_vars (c->temps); if (optimize) htab_delete (c->temp_htab); free (c); } static void gimple_push_bind_expr (tree bind) { TREE_CHAIN (bind) = gimplify_ctxp->current_bind_expr; gimplify_ctxp->current_bind_expr = bind; } static void gimple_pop_bind_expr (void) { gimplify_ctxp->current_bind_expr = TREE_CHAIN (gimplify_ctxp->current_bind_expr); } tree gimple_current_bind_expr (void) { return gimplify_ctxp->current_bind_expr; } /* Returns true iff there is a COND_EXPR between us and the innermost CLEANUP_POINT_EXPR. This info is used by gimple_push_cleanup. */ static bool gimple_conditional_context (void) { return gimplify_ctxp->conditions > 0; } /* Note that we've entered a COND_EXPR. */ static void gimple_push_condition (void) { #ifdef ENABLE_CHECKING if (gimplify_ctxp->conditions == 0) gcc_assert (!gimplify_ctxp->conditional_cleanups); #endif ++(gimplify_ctxp->conditions); } /* Note that we've left a COND_EXPR. If we're back at unconditional scope now, add any conditional cleanups we've seen to the prequeue. */ static void gimple_pop_condition (tree *pre_p) { int conds = --(gimplify_ctxp->conditions); gcc_assert (conds >= 0); if (conds == 0) { append_to_statement_list (gimplify_ctxp->conditional_cleanups, pre_p); gimplify_ctxp->conditional_cleanups = NULL_TREE; } } /* A stable comparison routine for use with splay trees and DECLs. */ static int splay_tree_compare_decl_uid (splay_tree_key xa, splay_tree_key xb) { tree a = (tree) xa; tree b = (tree) xb; return DECL_UID (a) - DECL_UID (b); } /* Create a new omp construct that deals with variable remapping. */ static struct gimplify_omp_ctx * new_omp_context (enum omp_region_type region_type) { struct gimplify_omp_ctx *c; c = XCNEW (struct gimplify_omp_ctx); c->outer_context = gimplify_omp_ctxp; c->variables = splay_tree_new (splay_tree_compare_decl_uid, 0, 0); c->privatized_types = pointer_set_create (); c->location = input_location; c->region_type = region_type; if (region_type != ORT_TASK) c->default_kind = OMP_CLAUSE_DEFAULT_SHARED; else c->default_kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED; return c; } /* Destroy an omp construct that deals with variable remapping. */ static void delete_omp_context (struct gimplify_omp_ctx *c) { splay_tree_delete (c->variables); pointer_set_destroy (c->privatized_types); XDELETE (c); } static void omp_add_variable (struct gimplify_omp_ctx *, tree, unsigned int); static bool omp_notice_variable (struct gimplify_omp_ctx *, tree, bool); /* A subroutine of append_to_statement_list{,_force}. T is not NULL. */ static void append_to_statement_list_1 (tree t, tree *list_p) { tree list = *list_p; tree_stmt_iterator i; if (!list) { if (t && TREE_CODE (t) == STATEMENT_LIST) { *list_p = t; return; } *list_p = list = alloc_stmt_list (); } i = tsi_last (list); tsi_link_after (&i, t, TSI_CONTINUE_LINKING); } /* Add T to the end of the list container pointed to by LIST_P. If T is an expression with no effects, it is ignored. */ void append_to_statement_list (tree t, tree *list_p) { if (t && TREE_SIDE_EFFECTS (t)) append_to_statement_list_1 (t, list_p); } /* Similar, but the statement is always added, regardless of side effects. */ void append_to_statement_list_force (tree t, tree *list_p) { if (t != NULL_TREE) append_to_statement_list_1 (t, list_p); } /* Both gimplify the statement T and append it to LIST_P. */ void gimplify_and_add (tree t, tree *list_p) { gimplify_stmt (&t); append_to_statement_list (t, list_p); } /* Strip off a legitimate source ending from the input string NAME of length LEN. Rather than having to know the names used by all of our front ends, we strip off an ending of a period followed by up to five characters. (Java uses ".class".) */ static inline void remove_suffix (char *name, int len) { int i; for (i = 2; i < 8 && len > i; i++) { if (name[len - i] == '.') { name[len - i] = '\0'; break; } } } /* Create a nameless artificial label and put it in the current function context. Returns the newly created label. */ tree create_artificial_label (void) { tree lab = build_decl (LABEL_DECL, NULL_TREE, void_type_node); DECL_ARTIFICIAL (lab) = 1; DECL_IGNORED_P (lab) = 1; DECL_CONTEXT (lab) = current_function_decl; return lab; } /* Subroutine for find_single_pointer_decl. */ static tree find_single_pointer_decl_1 (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, void *data) { tree *pdecl = (tree *) data; if (DECL_P (*tp) && POINTER_TYPE_P (TREE_TYPE (*tp))) { if (*pdecl) { /* We already found a pointer decl; return anything other than NULL_TREE to unwind from walk_tree signalling that we have a duplicate. */ return *tp; } *pdecl = *tp; } return NULL_TREE; } /* Find the single DECL of pointer type in the tree T and return it. If there are zero or more than one such DECLs, return NULL. */ static tree find_single_pointer_decl (tree t) { tree decl = NULL_TREE; if (walk_tree (&t, find_single_pointer_decl_1, &decl, NULL)) { /* find_single_pointer_decl_1 returns a nonzero value, causing walk_tree to return a nonzero value, to indicate that it found more than one pointer DECL. */ return NULL_TREE; } return decl; } /* Create a new temporary name with PREFIX. Returns an identifier. */ static GTY(()) unsigned int tmp_var_id_num; tree create_tmp_var_name (const char *prefix) { char *tmp_name; if (prefix) { char *preftmp = ASTRDUP (prefix); remove_suffix (preftmp, strlen (preftmp)); prefix = preftmp; } ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix ? prefix : "T", tmp_var_id_num++); return get_identifier (tmp_name); } /* Create a new temporary variable declaration of type TYPE. Does NOT push it into the current binding. */ tree create_tmp_var_raw (tree type, const char *prefix) { tree tmp_var; tree new_type; /* Make the type of the variable writable. */ new_type = build_type_variant (type, 0, 0); TYPE_ATTRIBUTES (new_type) = TYPE_ATTRIBUTES (type); tmp_var = build_decl (VAR_DECL, prefix ? create_tmp_var_name (prefix) : NULL, type); /* The variable was declared by the compiler. */ DECL_ARTIFICIAL (tmp_var) = 1; /* And we don't want debug info for it. */ DECL_IGNORED_P (tmp_var) = 1; /* Make the variable writable. */ TREE_READONLY (tmp_var) = 0; DECL_EXTERNAL (tmp_var) = 0; TREE_STATIC (tmp_var) = 0; TREE_USED (tmp_var) = 1; return tmp_var; } /* Create a new temporary variable declaration of type TYPE. DOES push the variable into the current binding. Further, assume that this is called only from gimplification or optimization, at which point the creation of certain types are bugs. */ tree create_tmp_var (tree type, const char *prefix) { tree tmp_var; /* We don't allow types that are addressable (meaning we can't make copies), or incomplete. We also used to reject every variable size objects here, but now support those for which a constant upper bound can be obtained. The processing for variable sizes is performed in gimple_add_tmp_var, point at which it really matters and possibly reached via paths not going through this function, e.g. after direct calls to create_tmp_var_raw. */ gcc_assert (!TREE_ADDRESSABLE (type) && COMPLETE_TYPE_P (type)); tmp_var = create_tmp_var_raw (type, prefix); gimple_add_tmp_var (tmp_var); return tmp_var; } /* Given a tree, try to return a useful variable name that we can use to prefix a temporary that is being assigned the value of the tree. I.E. given <temp> = &A, return A. */ const char * get_name (tree t) { tree stripped_decl; stripped_decl = t; STRIP_NOPS (stripped_decl); if (DECL_P (stripped_decl) && DECL_NAME (stripped_decl)) return IDENTIFIER_POINTER (DECL_NAME (stripped_decl)); else { switch (TREE_CODE (stripped_decl)) { case ADDR_EXPR: return get_name (TREE_OPERAND (stripped_decl, 0)); break; default: return NULL; } } } /* Create a temporary with a name derived from VAL. Subroutine of lookup_tmp_var; nobody else should call this function. */ static inline tree create_tmp_from_val (tree val) { return create_tmp_var (TYPE_MAIN_VARIANT (TREE_TYPE (val)), get_name (val)); } /* Create a temporary to hold the value of VAL. If IS_FORMAL, try to reuse an existing expression temporary. */ static tree lookup_tmp_var (tree val, bool is_formal) { tree ret; /* If not optimizing, never really reuse a temporary. local-alloc won't allocate any variable that is used in more than one basic block, which means it will go into memory, causing much extra work in reload and final and poorer code generation, outweighing the extra memory allocation here. */ if (!optimize || !is_formal || TREE_SIDE_EFFECTS (val)) ret = create_tmp_from_val (val); else { elt_t elt, *elt_p; void **slot; elt.val = val; slot = htab_find_slot (gimplify_ctxp->temp_htab, (void *)&elt, INSERT); if (*slot == NULL) { elt_p = XNEW (elt_t); elt_p->val = val; elt_p->temp = ret = create_tmp_from_val (val); *slot = (void *) elt_p; } else { elt_p = (elt_t *) *slot; ret = elt_p->temp; } } if (is_formal) DECL_GIMPLE_FORMAL_TEMP_P (ret) = 1; return ret; } /* Returns a formal temporary variable initialized with VAL. PRE_P is as in gimplify_expr. Only use this function if: 1) The value of the unfactored expression represented by VAL will not change between the initialization and use of the temporary, and 2) The temporary will not be otherwise modified. For instance, #1 means that this is inappropriate for SAVE_EXPR temps, and #2 means it is inappropriate for && temps. For other cases, use get_initialized_tmp_var instead. */ static tree internal_get_tmp_var (tree val, tree *pre_p, tree *post_p, bool is_formal) { tree t, mod; gimplify_expr (&val, pre_p, post_p, is_gimple_formal_tmp_rhs, fb_rvalue); t = lookup_tmp_var (val, is_formal); if (is_formal) { tree u = find_single_pointer_decl (val); if (u && TREE_CODE (u) == VAR_DECL && DECL_BASED_ON_RESTRICT_P (u)) u = DECL_GET_RESTRICT_BASE (u); if (u && TYPE_RESTRICT (TREE_TYPE (u))) { if (DECL_BASED_ON_RESTRICT_P (t)) gcc_assert (u == DECL_GET_RESTRICT_BASE (t)); else { DECL_BASED_ON_RESTRICT_P (t) = 1; SET_DECL_RESTRICT_BASE (t, u); } } } if (TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE) DECL_COMPLEX_GIMPLE_REG_P (t) = 1; mod = build2 (INIT_EXPR, TREE_TYPE (t), t, val); if (EXPR_HAS_LOCATION (val)) SET_EXPR_LOCUS (mod, EXPR_LOCUS (val)); else SET_EXPR_LOCATION (mod, input_location); /* gimplify_modify_expr might want to reduce this further. */ gimplify_and_add (mod, pre_p); /* If we're gimplifying into ssa, gimplify_modify_expr will have given our temporary an ssa name. Find and return it. */ if (gimplify_ctxp->into_ssa) t = TREE_OPERAND (mod, 0); return t; } /* Returns a formal temporary variable initialized with VAL. PRE_P points to a statement list where side-effects needed to compute VAL should be stored. */ tree get_formal_tmp_var (tree val, tree *pre_p) { return internal_get_tmp_var (val, pre_p, NULL, true); } /* Returns a temporary variable initialized with VAL. PRE_P and POST_P are as in gimplify_expr. */ tree get_initialized_tmp_var (tree val, tree *pre_p, tree *post_p) { return internal_get_tmp_var (val, pre_p, post_p, false); } /* Declares all the variables in VARS in SCOPE. If DEBUG_INFO is true, generate debug info for them; otherwise don't. */ void declare_vars (tree vars, tree scope, bool debug_info) { tree last = vars; if (last) { tree temps, block; /* C99 mode puts the default 'return 0;' for main outside the outer braces. So drill down until we find an actual scope. */ while (TREE_CODE (scope) == COMPOUND_EXPR) scope = TREE_OPERAND (scope, 0); gcc_assert (TREE_CODE (scope) == BIND_EXPR); temps = nreverse (last); block = BIND_EXPR_BLOCK (scope); if (!block || !debug_info) { TREE_CHAIN (last) = BIND_EXPR_VARS (scope); BIND_EXPR_VARS (scope) = temps; } else { /* We need to attach the nodes both to the BIND_EXPR and to its associated BLOCK for debugging purposes. The key point here is that the BLOCK_VARS of the BIND_EXPR_BLOCK of a BIND_EXPR is a subchain of the BIND_EXPR_VARS of the BIND_EXPR. */ if (BLOCK_VARS (block)) BLOCK_VARS (block) = chainon (BLOCK_VARS (block), temps); else { BIND_EXPR_VARS (scope) = chainon (BIND_EXPR_VARS (scope), temps); BLOCK_VARS (block) = temps; } } } } /* For VAR a VAR_DECL of variable size, try to find a constant upper bound for the size and adjust DECL_SIZE/DECL_SIZE_UNIT accordingly. Abort if no such upper bound can be obtained. */ static void force_constant_size (tree var) { /* The only attempt we make is by querying the maximum size of objects of the variable's type. */ HOST_WIDE_INT max_size; gcc_assert (TREE_CODE (var) == VAR_DECL); max_size = max_int_size_in_bytes (TREE_TYPE (var)); gcc_assert (max_size >= 0); DECL_SIZE_UNIT (var) = build_int_cst (TREE_TYPE (DECL_SIZE_UNIT (var)), max_size); DECL_SIZE (var) = build_int_cst (TREE_TYPE (DECL_SIZE (var)), max_size * BITS_PER_UNIT); } void gimple_add_tmp_var (tree tmp) { gcc_assert (!TREE_CHAIN (tmp) && !DECL_SEEN_IN_BIND_EXPR_P (tmp)); /* Later processing assumes that the object size is constant, which might not be true at this point. Force the use of a constant upper bound in this case. */ if (!host_integerp (DECL_SIZE_UNIT (tmp), 1)) force_constant_size (tmp); DECL_CONTEXT (tmp) = current_function_decl; DECL_SEEN_IN_BIND_EXPR_P (tmp) = 1; if (gimplify_ctxp) { TREE_CHAIN (tmp) = gimplify_ctxp->temps; gimplify_ctxp->temps = tmp; /* Mark temporaries local within the nearest enclosing parallel. */ if (gimplify_omp_ctxp) { struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp; while (ctx && ctx->region_type == ORT_WORKSHARE) ctx = ctx->outer_context; if (ctx) omp_add_variable (ctx, tmp, GOVD_LOCAL | GOVD_SEEN); } } else if (cfun) record_vars (tmp); else declare_vars (tmp, DECL_SAVED_TREE (current_function_decl), false); } /* Determines whether to assign a locus to the statement STMT. */ static bool should_carry_locus_p (tree stmt) { /* Don't emit a line note for a label. We particularly don't want to emit one for the break label, since it doesn't actually correspond to the beginning of the loop/switch. */ if (TREE_CODE (stmt) == LABEL_EXPR) return false; /* Do not annotate empty statements, since it confuses gcov. */ if (!TREE_SIDE_EFFECTS (stmt)) return false; return true; } static void annotate_one_with_locus (tree t, location_t locus) { if (EXPR_P (t) && ! EXPR_HAS_LOCATION (t) && should_carry_locus_p (t)) SET_EXPR_LOCATION (t, locus); } void annotate_all_with_locus (tree *stmt_p, location_t locus) { tree_stmt_iterator i; if (!*stmt_p) return; for (i = tsi_start (*stmt_p); !tsi_end_p (i); tsi_next (&i)) { tree t = tsi_stmt (i); /* Assuming we've already been gimplified, we shouldn't see nested chaining constructs anymore. */ gcc_assert (TREE_CODE (t) != STATEMENT_LIST && TREE_CODE (t) != COMPOUND_EXPR); annotate_one_with_locus (t, locus); } } /* Similar to copy_tree_r() but do not copy SAVE_EXPR or TARGET_EXPR nodes. These nodes model computations that should only be done once. If we were to unshare something like SAVE_EXPR(i++), the gimplification process would create wrong code. */ static tree mostly_copy_tree_r (tree *tp, int *walk_subtrees, void *data) { enum tree_code code = TREE_CODE (*tp); /* Don't unshare types, decls, constants and SAVE_EXPR nodes. */ if (TREE_CODE_CLASS (code) == tcc_type || TREE_CODE_CLASS (code) == tcc_declaration || TREE_CODE_CLASS (code) == tcc_constant || code == SAVE_EXPR || code == TARGET_EXPR /* We can't do anything sensible with a BLOCK used as an expression, but we also can't just die when we see it because of non-expression uses. So just avert our eyes and cross our fingers. Silly Java. */ || code == BLOCK) *walk_subtrees = 0; else { gcc_assert (code != BIND_EXPR); copy_tree_r (tp, walk_subtrees, data); } return NULL_TREE; } /* Callback for walk_tree to unshare most of the shared trees rooted at *TP. If *TP has been visited already (i.e., TREE_VISITED (*TP) == 1), then *TP is deep copied by calling copy_tree_r. This unshares the same trees as copy_tree_r with the exception of SAVE_EXPR nodes. These nodes model computations that should only be done once. If we were to unshare something like SAVE_EXPR(i++), the gimplification process would create wrong code. */ static tree copy_if_shared_r (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { tree t = *tp; enum tree_code code = TREE_CODE (t); /* Skip types, decls, and constants. But we do want to look at their types and the bounds of types. Mark them as visited so we properly unmark their subtrees on the unmark pass. If we've already seen them, don't look down further. */ if (TREE_CODE_CLASS (code) == tcc_type || TREE_CODE_CLASS (code) == tcc_declaration || TREE_CODE_CLASS (code) == tcc_constant) { if (TREE_VISITED (t)) *walk_subtrees = 0; else TREE_VISITED (t) = 1; } /* If this node has been visited already, unshare it and don't look any deeper. */ else if (TREE_VISITED (t)) { walk_tree (tp, mostly_copy_tree_r, NULL, NULL); *walk_subtrees = 0; } /* Otherwise, mark the tree as visited and keep looking. */ else TREE_VISITED (t) = 1; return NULL_TREE; } static tree unmark_visited_r (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { if (TREE_VISITED (*tp)) TREE_VISITED (*tp) = 0; else *walk_subtrees = 0; return NULL_TREE; } /* Unshare all the trees in BODY_P, a pointer into the body of FNDECL, and the bodies of any nested functions if we are unsharing the entire body of FNDECL. */ static void unshare_body (tree *body_p, tree fndecl) { struct cgraph_node *cgn = cgraph_node (fndecl); walk_tree (body_p, copy_if_shared_r, NULL, NULL); if (body_p == &DECL_SAVED_TREE (fndecl)) for (cgn = cgn->nested; cgn; cgn = cgn->next_nested) unshare_body (&DECL_SAVED_TREE (cgn->decl), cgn->decl); } /* Likewise, but mark all trees as not visited. */ static void unvisit_body (tree *body_p, tree fndecl) { struct cgraph_node *cgn = cgraph_node (fndecl); walk_tree (body_p, unmark_visited_r, NULL, NULL); if (body_p == &DECL_SAVED_TREE (fndecl)) for (cgn = cgn->nested; cgn; cgn = cgn->next_nested) unvisit_body (&DECL_SAVED_TREE (cgn->decl), cgn->decl); } /* Unshare T and all the trees reached from T via TREE_CHAIN. */ static void unshare_all_trees (tree t) { walk_tree (&t, copy_if_shared_r, NULL, NULL); walk_tree (&t, unmark_visited_r, NULL, NULL); } /* Unconditionally make an unshared copy of EXPR. This is used when using stored expressions which span multiple functions, such as BINFO_VTABLE, as the normal unsharing process can't tell that they're shared. */ tree unshare_expr (tree expr) { walk_tree (&expr, mostly_copy_tree_r, NULL, NULL); return expr; } /* A terser interface for building a representation of an exception specification. */ tree gimple_build_eh_filter (tree body, tree allowed, tree failure) { tree t; /* FIXME should the allowed types go in TREE_TYPE? */ t = build2 (EH_FILTER_EXPR, void_type_node, allowed, NULL_TREE); append_to_statement_list (failure, &EH_FILTER_FAILURE (t)); t = build2 (TRY_CATCH_EXPR, void_type_node, NULL_TREE, t); append_to_statement_list (body, &TREE_OPERAND (t, 0)); return t; } /* WRAPPER is a code such as BIND_EXPR or CLEANUP_POINT_EXPR which can both contain statements and have a value. Assign its value to a temporary and give it void_type_node. Returns the temporary, or NULL_TREE if WRAPPER was already void. */ tree voidify_wrapper_expr (tree wrapper, tree temp) { tree type = TREE_TYPE (wrapper); if (type && !VOID_TYPE_P (type)) { tree *p; /* Set p to point to the body of the wrapper. Loop until we find something that isn't a wrapper. */ for (p = &wrapper; p && *p; ) { switch (TREE_CODE (*p)) { case BIND_EXPR: TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; /* For a BIND_EXPR, the body is operand 1. */ p = &BIND_EXPR_BODY (*p); break; case CLEANUP_POINT_EXPR: case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; p = &TREE_OPERAND (*p, 0); break; case STATEMENT_LIST: { tree_stmt_iterator i = tsi_last (*p); TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; p = tsi_end_p (i) ? NULL : tsi_stmt_ptr (i); } break; case COMPOUND_EXPR: /* Advance to the last statement. Set all container types to void. */ for (; TREE_CODE (*p) == COMPOUND_EXPR; p = &TREE_OPERAND (*p, 1)) { TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; } break; default: goto out; } } out: if (p == NULL || IS_EMPTY_STMT (*p)) temp = NULL_TREE; else if (temp) { /* The wrapper is on the RHS of an assignment that we're pushing down. */ gcc_assert (TREE_CODE (temp) == INIT_EXPR || TREE_CODE (temp) == MODIFY_EXPR); TREE_OPERAND (temp, 1) = *p; *p = temp; } else { temp = create_tmp_var (type, "retval"); *p = build2 (INIT_EXPR, type, temp, *p); } return temp; } return NULL_TREE; } /* Prepare calls to builtins to SAVE and RESTORE the stack as well as a temporary through which they communicate. */ static void build_stack_save_restore (tree *save, tree *restore) { tree save_call, tmp_var; save_call = build_function_call_expr (implicit_built_in_decls[BUILT_IN_STACK_SAVE], NULL_TREE); tmp_var = create_tmp_var (ptr_type_node, "saved_stack"); *save = build2 (MODIFY_EXPR, ptr_type_node, tmp_var, save_call); *restore = build_function_call_expr (implicit_built_in_decls[BUILT_IN_STACK_RESTORE], tree_cons (NULL_TREE, tmp_var, NULL_TREE)); } /* Gimplify a BIND_EXPR. Just voidify and recurse. */ static enum gimplify_status gimplify_bind_expr (tree *expr_p, tree *pre_p) { tree bind_expr = *expr_p; bool old_save_stack = gimplify_ctxp->save_stack; tree t; tree temp = voidify_wrapper_expr (bind_expr, NULL); /* Mark variables seen in this bind expr. */ for (t = BIND_EXPR_VARS (bind_expr); t ; t = TREE_CHAIN (t)) { if (TREE_CODE (t) == VAR_DECL) { struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp; /* Mark variable as local. */ if (ctx && !is_global_var (t) && (! DECL_SEEN_IN_BIND_EXPR_P (t) || splay_tree_lookup (ctx->variables, (splay_tree_key) t) == NULL)) omp_add_variable (gimplify_omp_ctxp, t, GOVD_LOCAL | GOVD_SEEN); DECL_SEEN_IN_BIND_EXPR_P (t) = 1; } /* Preliminarily mark non-addressed complex variables as eligible for promotion to gimple registers. We'll transform their uses as we find them. */ if (TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE && !TREE_THIS_VOLATILE (t) && (TREE_CODE (t) == VAR_DECL && !DECL_HARD_REGISTER (t)) && !needs_to_live_in_memory (t)) DECL_COMPLEX_GIMPLE_REG_P (t) = 1; } gimple_push_bind_expr (bind_expr); gimplify_ctxp->save_stack = false; gimplify_to_stmt_list (&BIND_EXPR_BODY (bind_expr)); if (gimplify_ctxp->save_stack) { tree stack_save, stack_restore; /* Save stack on entry and restore it on exit. Add a try_finally block to achieve this. Note that mudflap depends on the format of the emitted code: see mx_register_decls(). */ build_stack_save_restore (&stack_save, &stack_restore); t = build2 (TRY_FINALLY_EXPR, void_type_node, BIND_EXPR_BODY (bind_expr), NULL_TREE); append_to_statement_list (stack_restore, &TREE_OPERAND (t, 1)); BIND_EXPR_BODY (bind_expr) = NULL_TREE; append_to_statement_list (stack_save, &BIND_EXPR_BODY (bind_expr)); append_to_statement_list (t, &BIND_EXPR_BODY (bind_expr)); } gimplify_ctxp->save_stack = old_save_stack; gimple_pop_bind_expr (); if (temp) { *expr_p = temp; append_to_statement_list (bind_expr, pre_p); return GS_OK; } else return GS_ALL_DONE; } /* Gimplify a RETURN_EXPR. If the expression to be returned is not a GIMPLE value, it is assigned to a new temporary and the statement is re-written to return the temporary. PRE_P points to the list where side effects that must happen before STMT should be stored. */ static enum gimplify_status gimplify_return_expr (tree stmt, tree *pre_p) { tree ret_expr = TREE_OPERAND (stmt, 0); tree result_decl, result; if (!ret_expr || TREE_CODE (ret_expr) == RESULT_DECL || ret_expr == error_mark_node) return GS_ALL_DONE; if (VOID_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl)))) result_decl = NULL_TREE; else { result_decl = TREE_OPERAND (ret_expr, 0); if (TREE_CODE (result_decl) == INDIRECT_REF) /* See through a return by reference. */ result_decl = TREE_OPERAND (result_decl, 0); gcc_assert ((TREE_CODE (ret_expr) == MODIFY_EXPR || TREE_CODE (ret_expr) == INIT_EXPR) && TREE_CODE (result_decl) == RESULT_DECL); } /* If aggregate_value_p is true, then we can return the bare RESULT_DECL. Recall that aggregate_value_p is FALSE for any aggregate type that is returned in registers. If we're returning values in registers, then we don't want to extend the lifetime of the RESULT_DECL, particularly across another call. In addition, for those aggregates for which hard_function_value generates a PARALLEL, we'll die during normal expansion of structure assignments; there's special code in expand_return to handle this case that does not exist in expand_expr. */ if (!result_decl || aggregate_value_p (result_decl, TREE_TYPE (current_function_decl))) result = result_decl; else if (gimplify_ctxp->return_temp) result = gimplify_ctxp->return_temp; else { result = create_tmp_var (TREE_TYPE (result_decl), NULL); /* ??? With complex control flow (usually involving abnormal edges), we can wind up warning about an uninitialized value for this. Due to how this variable is constructed and initialized, this is never true. Give up and never warn. */ TREE_NO_WARNING (result) = 1; gimplify_ctxp->return_temp = result; } /* Smash the lhs of the MODIFY_EXPR to the temporary we plan to use. Then gimplify the whole thing. */ if (result != result_decl) TREE_OPERAND (ret_expr, 0) = result; gimplify_and_add (TREE_OPERAND (stmt, 0), pre_p); /* If we didn't use a temporary, then the result is just the result_decl. Otherwise we need a simple copy. This should already be gimple. */ if (result == result_decl) ret_expr = result; else ret_expr = build2 (MODIFY_EXPR, TREE_TYPE (result), result_decl, result); TREE_OPERAND (stmt, 0) = ret_expr; return GS_ALL_DONE; } /* Gimplifies a DECL_EXPR node *STMT_P by making any necessary allocation and initialization explicit. */ static enum gimplify_status gimplify_decl_expr (tree *stmt_p) { tree stmt = *stmt_p; tree decl = DECL_EXPR_DECL (stmt); *stmt_p = NULL_TREE; if (TREE_TYPE (decl) == error_mark_node) return GS_ERROR; if ((TREE_CODE (decl) == TYPE_DECL || TREE_CODE (decl) == VAR_DECL) && !TYPE_SIZES_GIMPLIFIED (TREE_TYPE (decl))) gimplify_type_sizes (TREE_TYPE (decl), stmt_p); if (TREE_CODE (decl) == VAR_DECL && !DECL_EXTERNAL (decl)) { tree init = DECL_INITIAL (decl); if (TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST) { /* This is a variable-sized decl. Simplify its size and mark it for deferred expansion. Note that mudflap depends on the format of the emitted code: see mx_register_decls(). */ tree t, args, addr, ptr_type; gimplify_one_sizepos (&DECL_SIZE (decl), stmt_p); gimplify_one_sizepos (&DECL_SIZE_UNIT (decl), stmt_p); /* All occurrences of this decl in final gimplified code will be replaced by indirection. Setting DECL_VALUE_EXPR does two things: First, it lets the rest of the gimplifier know what replacement to use. Second, it lets the debug info know where to find the value. */ ptr_type = build_pointer_type (TREE_TYPE (decl)); addr = create_tmp_var (ptr_type, get_name (decl)); DECL_IGNORED_P (addr) = 0; t = build_fold_indirect_ref (addr); SET_DECL_VALUE_EXPR (decl, t); DECL_HAS_VALUE_EXPR_P (decl) = 1; args = tree_cons (NULL, DECL_SIZE_UNIT (decl), NULL); t = built_in_decls[BUILT_IN_ALLOCA]; t = build_function_call_expr (t, args); t = fold_convert (ptr_type, t); t = build2 (MODIFY_EXPR, void_type_node, addr, t); gimplify_and_add (t, stmt_p); /* Indicate that we need to restore the stack level when the enclosing BIND_EXPR is exited. */ gimplify_ctxp->save_stack = true; } if (init && init != error_mark_node) { if (!TREE_STATIC (decl)) { DECL_INITIAL (decl) = NULL_TREE; init = build2 (INIT_EXPR, void_type_node, decl, init); gimplify_and_add (init, stmt_p); } else /* We must still examine initializers for static variables as they may contain a label address. */ walk_tree (&init, force_labels_r, NULL, NULL); } /* Some front ends do not explicitly declare all anonymous artificial variables. We compensate here by declaring the variables, though it would be better if the front ends would explicitly declare them. */ if (!DECL_SEEN_IN_BIND_EXPR_P (decl) && DECL_ARTIFICIAL (decl) && DECL_NAME (decl) == NULL_TREE) gimple_add_tmp_var (decl); } return GS_ALL_DONE; } /* Gimplify a LOOP_EXPR. Normally this just involves gimplifying the body and replacing the LOOP_EXPR with goto, but if the loop contains an EXIT_EXPR, we need to append a label for it to jump to. */ static enum gimplify_status gimplify_loop_expr (tree *expr_p, tree *pre_p) { tree saved_label = gimplify_ctxp->exit_label; tree start_label = build1 (LABEL_EXPR, void_type_node, NULL_TREE); tree jump_stmt = build_and_jump (&LABEL_EXPR_LABEL (start_label)); append_to_statement_list (start_label, pre_p); gimplify_ctxp->exit_label = NULL_TREE; gimplify_and_add (LOOP_EXPR_BODY (*expr_p), pre_p); if (gimplify_ctxp->exit_label) { append_to_statement_list (jump_stmt, pre_p); *expr_p = build1 (LABEL_EXPR, void_type_node, gimplify_ctxp->exit_label); } else *expr_p = jump_stmt; gimplify_ctxp->exit_label = saved_label; return GS_ALL_DONE; } /* Compare two case labels. Because the front end should already have made sure that case ranges do not overlap, it is enough to only compare the CASE_LOW values of each case label. */ static int compare_case_labels (const void *p1, const void *p2) { tree case1 = *(tree *)p1; tree case2 = *(tree *)p2; return tree_int_cst_compare (CASE_LOW (case1), CASE_LOW (case2)); } /* Sort the case labels in LABEL_VEC in place in ascending order. */ void sort_case_labels (tree label_vec) { size_t len = TREE_VEC_LENGTH (label_vec); tree default_case = TREE_VEC_ELT (label_vec, len - 1); if (CASE_LOW (default_case)) { size_t i; /* The last label in the vector should be the default case but it is not. */ for (i = 0; i < len; ++i) { tree t = TREE_VEC_ELT (label_vec, i); if (!CASE_LOW (t)) { default_case = t; TREE_VEC_ELT (label_vec, i) = TREE_VEC_ELT (label_vec, len - 1); TREE_VEC_ELT (label_vec, len - 1) = default_case; break; } } } qsort (&TREE_VEC_ELT (label_vec, 0), len - 1, sizeof (tree), compare_case_labels); } /* Gimplify a SWITCH_EXPR, and collect a TREE_VEC of the labels it can branch to. */ static enum gimplify_status gimplify_switch_expr (tree *expr_p, tree *pre_p) { tree switch_expr = *expr_p; enum gimplify_status ret; ret = gimplify_expr (&SWITCH_COND (switch_expr), pre_p, NULL, is_gimple_val, fb_rvalue); if (SWITCH_BODY (switch_expr)) { VEC(tree,heap) *labels, *saved_labels; tree label_vec, default_case = NULL_TREE; size_t i, len; /* If someone can be bothered to fill in the labels, they can be bothered to null out the body too. */ gcc_assert (!SWITCH_LABELS (switch_expr)); saved_labels = gimplify_ctxp->case_labels; gimplify_ctxp->case_labels = VEC_alloc (tree, heap, 8); gimplify_to_stmt_list (&SWITCH_BODY (switch_expr)); labels = gimplify_ctxp->case_labels; gimplify_ctxp->case_labels = saved_labels; i = 0; while (i < VEC_length (tree, labels)) { tree elt = VEC_index (tree, labels, i); tree low = CASE_LOW (elt); bool remove_element = FALSE; if (low) { /* Discard empty ranges. */ tree high = CASE_HIGH (elt); if (high && INT_CST_LT (high, low)) remove_element = TRUE; } else { /* The default case must be the last label in the list. */ gcc_assert (!default_case); default_case = elt; remove_element = TRUE; } if (remove_element) VEC_ordered_remove (tree, labels, i); else i++; } len = i; label_vec = make_tree_vec (len + 1); SWITCH_LABELS (*expr_p) = label_vec; append_to_statement_list (switch_expr, pre_p); if (! default_case) { /* If the switch has no default label, add one, so that we jump around the switch body. */ default_case = build3 (CASE_LABEL_EXPR, void_type_node, NULL_TREE, NULL_TREE, create_artificial_label ()); append_to_statement_list (SWITCH_BODY (switch_expr), pre_p); *expr_p = build1 (LABEL_EXPR, void_type_node, CASE_LABEL (default_case)); } else *expr_p = SWITCH_BODY (switch_expr); for (i = 0; i < len; ++i) TREE_VEC_ELT (label_vec, i) = VEC_index (tree, labels, i); TREE_VEC_ELT (label_vec, len) = default_case; VEC_free (tree, heap, labels); sort_case_labels (label_vec); SWITCH_BODY (switch_expr) = NULL; } else gcc_assert (SWITCH_LABELS (switch_expr)); return ret; } static enum gimplify_status gimplify_case_label_expr (tree *expr_p) { tree expr = *expr_p; struct gimplify_ctx *ctxp; /* Invalid OpenMP programs can play Duff's Device type games with #pragma omp parallel. At least in the C front end, we don't detect such invalid branches until after gimplification. */ for (ctxp = gimplify_ctxp; ; ctxp = ctxp->prev_context) if (ctxp->case_labels) break; VEC_safe_push (tree, heap, ctxp->case_labels, expr); *expr_p = build1 (LABEL_EXPR, void_type_node, CASE_LABEL (expr)); return GS_ALL_DONE; } /* Build a GOTO to the LABEL_DECL pointed to by LABEL_P, building it first if necessary. */ tree build_and_jump (tree *label_p) { if (label_p == NULL) /* If there's nowhere to jump, just fall through. */ return NULL_TREE; if (*label_p == NULL_TREE) { tree label = create_artificial_label (); *label_p = label; } return build1 (GOTO_EXPR, void_type_node, *label_p); } /* Gimplify an EXIT_EXPR by converting to a GOTO_EXPR inside a COND_EXPR. This also involves building a label to jump to and communicating it to gimplify_loop_expr through gimplify_ctxp->exit_label. */ static enum gimplify_status gimplify_exit_expr (tree *expr_p) { tree cond = TREE_OPERAND (*expr_p, 0); tree expr; expr = build_and_jump (&gimplify_ctxp->exit_label); expr = build3 (COND_EXPR, void_type_node, cond, expr, NULL_TREE); *expr_p = expr; return GS_OK; } /* A helper function to be called via walk_tree. Mark all labels under *TP as being forced. To be called for DECL_INITIAL of static variables. */ tree force_labels_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED) { if (TYPE_P (*tp)) *walk_subtrees = 0; if (TREE_CODE (*tp) == LABEL_DECL) FORCED_LABEL (*tp) = 1; return NULL_TREE; } /* *EXPR_P is a COMPONENT_REF being used as an rvalue. If its type is different from its canonical type, wrap the whole thing inside a NOP_EXPR and force the type of the COMPONENT_REF to be the canonical type. The canonical type of a COMPONENT_REF is the type of the field being referenced--unless the field is a bit-field which can be read directly in a smaller mode, in which case the canonical type is the sign-appropriate type corresponding to that mode. */ static void canonicalize_component_ref (tree *expr_p) { tree expr = *expr_p; tree type; gcc_assert (TREE_CODE (expr) == COMPONENT_REF); if (INTEGRAL_TYPE_P (TREE_TYPE (expr))) type = TREE_TYPE (get_unwidened (expr, NULL_TREE)); else type = TREE_TYPE (TREE_OPERAND (expr, 1)); if (TREE_TYPE (expr) != type) { tree old_type = TREE_TYPE (expr); /* Set the type of the COMPONENT_REF to the underlying type. */ TREE_TYPE (expr) = type; /* And wrap the whole thing inside a NOP_EXPR. */ expr = build1 (NOP_EXPR, old_type, expr); *expr_p = expr; } } /* If a NOP conversion is changing a pointer to array of foo to a pointer to foo, embed that change in the ADDR_EXPR by converting T array[U]; (T *)&array ==> &array[L] where L is the lower bound. For simplicity, only do this for constant lower bound. */ static void canonicalize_addr_expr (tree *expr_p) { tree expr = *expr_p; tree ctype = TREE_TYPE (expr); tree addr_expr = TREE_OPERAND (expr, 0); tree atype = TREE_TYPE (addr_expr); tree dctype, datype, ddatype, otype, obj_expr; /* Both cast and addr_expr types should be pointers. */ if (!POINTER_TYPE_P (ctype) || !POINTER_TYPE_P (atype)) return; /* The addr_expr type should be a pointer to an array. */ datype = TREE_TYPE (atype); if (TREE_CODE (datype) != ARRAY_TYPE) return; /* Both cast and addr_expr types should address the same object type. */ dctype = TREE_TYPE (ctype); ddatype = TREE_TYPE (datype); if (!lang_hooks.types_compatible_p (ddatype, dctype)) return; /* The addr_expr and the object type should match. */ obj_expr = TREE_OPERAND (addr_expr, 0); otype = TREE_TYPE (obj_expr); if (!lang_hooks.types_compatible_p (otype, datype)) return; /* The lower bound and element sizes must be constant. */ if (!TYPE_SIZE_UNIT (dctype) || TREE_CODE (TYPE_SIZE_UNIT (dctype)) != INTEGER_CST || !TYPE_DOMAIN (datype) || !TYPE_MIN_VALUE (TYPE_DOMAIN (datype)) || TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (datype))) != INTEGER_CST) return; /* All checks succeeded. Build a new node to merge the cast. */ *expr_p = build4 (ARRAY_REF, dctype, obj_expr, TYPE_MIN_VALUE (TYPE_DOMAIN (datype)), TYPE_MIN_VALUE (TYPE_DOMAIN (datype)), size_binop (EXACT_DIV_EXPR, TYPE_SIZE_UNIT (dctype), size_int (TYPE_ALIGN_UNIT (dctype)))); *expr_p = build1 (ADDR_EXPR, ctype, *expr_p); } /* *EXPR_P is a NOP_EXPR or CONVERT_EXPR. Remove it and/or other conversions underneath as appropriate. */ static enum gimplify_status gimplify_conversion (tree *expr_p) { gcc_assert (TREE_CODE (*expr_p) == NOP_EXPR || TREE_CODE (*expr_p) == CONVERT_EXPR); /* Then strip away all but the outermost conversion. */ STRIP_SIGN_NOPS (TREE_OPERAND (*expr_p, 0)); /* And remove the outermost conversion if it's useless. */ if (tree_ssa_useless_type_conversion (*expr_p)) *expr_p = TREE_OPERAND (*expr_p, 0); /* If we still have a conversion at the toplevel, then canonicalize some constructs. */ if (TREE_CODE (*expr_p) == NOP_EXPR || TREE_CODE (*expr_p) == CONVERT_EXPR) { tree sub = TREE_OPERAND (*expr_p, 0); /* If a NOP conversion is changing the type of a COMPONENT_REF expression, then canonicalize its type now in order to expose more redundant conversions. */ if (TREE_CODE (sub) == COMPONENT_REF) canonicalize_component_ref (&TREE_OPERAND (*expr_p, 0)); /* If a NOP conversion is changing a pointer to array of foo to a pointer to foo, embed that change in the ADDR_EXPR. */ else if (TREE_CODE (sub) == ADDR_EXPR) canonicalize_addr_expr (expr_p); } return GS_OK; } /* Gimplify a VAR_DECL or PARM_DECL. Returns GS_OK if we expanded a DECL_VALUE_EXPR, and it's worth re-examining things. */ static enum gimplify_status gimplify_var_or_parm_decl (tree *expr_p) { tree decl = *expr_p; /* ??? If this is a local variable, and it has not been seen in any outer BIND_EXPR, then it's probably the result of a duplicate declaration, for which we've already issued an error. It would be really nice if the front end wouldn't leak these at all. Currently the only known culprit is C++ destructors, as seen in g++.old-deja/g++.jason/binding.C. */ if (TREE_CODE (decl) == VAR_DECL && !DECL_SEEN_IN_BIND_EXPR_P (decl) && !TREE_STATIC (decl) && !DECL_EXTERNAL (decl) && decl_function_context (decl) == current_function_decl) { gcc_assert (errorcount || sorrycount); return GS_ERROR; } /* When within an OpenMP context, notice uses of variables. */ if (gimplify_omp_ctxp && omp_notice_variable (gimplify_omp_ctxp, decl, true)) return GS_ALL_DONE; /* If the decl is an alias for another expression, substitute it now. */ if (DECL_HAS_VALUE_EXPR_P (decl)) { *expr_p = unshare_expr (DECL_VALUE_EXPR (decl)); return GS_OK; } return GS_ALL_DONE; } /* Gimplify the COMPONENT_REF, ARRAY_REF, REALPART_EXPR or IMAGPART_EXPR node pointed to by EXPR_P. compound_lval : min_lval '[' val ']' | min_lval '.' ID | compound_lval '[' val ']' | compound_lval '.' ID This is not part of the original SIMPLE definition, which separates array and member references, but it seems reasonable to handle them together. Also, this way we don't run into problems with union aliasing; gcc requires that for accesses through a union to alias, the union reference must be explicit, which was not always the case when we were splitting up array and member refs. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. */ static enum gimplify_status gimplify_compound_lval (tree *expr_p, tree *pre_p, tree *post_p, fallback_t fallback) { tree *p; VEC(tree,heap) *stack; enum gimplify_status ret = GS_OK, tret; int i; /* Create a stack of the subexpressions so later we can walk them in order from inner to outer. */ stack = VEC_alloc (tree, heap, 10); /* We can handle anything that get_inner_reference can deal with. */ for (p = expr_p; ; p = &TREE_OPERAND (*p, 0)) { restart: /* Fold INDIRECT_REFs now to turn them into ARRAY_REFs. */ if (TREE_CODE (*p) == INDIRECT_REF) *p = fold_indirect_ref (*p); if (handled_component_p (*p)) ; /* Expand DECL_VALUE_EXPR now. In some cases that may expose additional COMPONENT_REFs. */ else if ((TREE_CODE (*p) == VAR_DECL || TREE_CODE (*p) == PARM_DECL) && gimplify_var_or_parm_decl (p) == GS_OK) goto restart; else break; VEC_safe_push (tree, heap, stack, *p); } gcc_assert (VEC_length (tree, stack)); /* Now STACK is a stack of pointers to all the refs we've walked through and P points to the innermost expression. Java requires that we elaborated nodes in source order. That means we must gimplify the inner expression followed by each of the indices, in order. But we can't gimplify the inner expression until we deal with any variable bounds, sizes, or positions in order to deal with PLACEHOLDER_EXPRs. So we do this in three steps. First we deal with the annotations for any variables in the components, then we gimplify the base, then we gimplify any indices, from left to right. */ for (i = VEC_length (tree, stack) - 1; i >= 0; i--) { tree t = VEC_index (tree, stack, i); if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF) { /* Gimplify the low bound and element type size and put them into the ARRAY_REF. If these values are set, they have already been gimplified. */ if (!TREE_OPERAND (t, 2)) { tree low = unshare_expr (array_ref_low_bound (t)); if (!is_gimple_min_invariant (low)) { TREE_OPERAND (t, 2) = low; tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_formal_tmp_reg, fb_rvalue); ret = MIN (ret, tret); } } if (!TREE_OPERAND (t, 3)) { tree elmt_type = TREE_TYPE (TREE_TYPE (TREE_OPERAND (t, 0))); tree elmt_size = unshare_expr (array_ref_element_size (t)); tree factor = size_int (TYPE_ALIGN_UNIT (elmt_type)); /* Divide the element size by the alignment of the element type (above). */ elmt_size = size_binop (EXACT_DIV_EXPR, elmt_size, factor); if (!is_gimple_min_invariant (elmt_size)) { TREE_OPERAND (t, 3) = elmt_size; tret = gimplify_expr (&TREE_OPERAND (t, 3), pre_p, post_p, is_gimple_formal_tmp_reg, fb_rvalue); ret = MIN (ret, tret); } } } else if (TREE_CODE (t) == COMPONENT_REF) { /* Set the field offset into T and gimplify it. */ if (!TREE_OPERAND (t, 2)) { tree offset = unshare_expr (component_ref_field_offset (t)); tree field = TREE_OPERAND (t, 1); tree factor = size_int (DECL_OFFSET_ALIGN (field) / BITS_PER_UNIT); /* Divide the offset by its alignment. */ offset = size_binop (EXACT_DIV_EXPR, offset, factor); if (!is_gimple_min_invariant (offset)) { TREE_OPERAND (t, 2) = offset; tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_formal_tmp_reg, fb_rvalue); ret = MIN (ret, tret); } } } } /* Step 2 is to gimplify the base expression. Make sure lvalue is set so as to match the min_lval predicate. Failure to do so may result in the creation of large aggregate temporaries. */ tret = gimplify_expr (p, pre_p, post_p, is_gimple_min_lval, fallback | fb_lvalue); ret = MIN (ret, tret); /* And finally, the indices and operands to BIT_FIELD_REF. During this loop we also remove any useless conversions. */ for (; VEC_length (tree, stack) > 0; ) { tree t = VEC_pop (tree, stack); if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF) { /* Gimplify the dimension. Temporary fix for gcc.c-torture/execute/20040313-1.c. Gimplify non-constant array indices into a temporary variable. FIXME - The real fix is to gimplify post-modify expressions into a minimal gimple lvalue. However, that exposes bugs in alias analysis. The alias analyzer does not handle &PTR->FIELD very well. Will fix after the branch is merged into mainline (dnovillo 2004-05-03). */ if (!is_gimple_min_invariant (TREE_OPERAND (t, 1))) { tret = gimplify_expr (&TREE_OPERAND (t, 1), pre_p, post_p, is_gimple_formal_tmp_reg, fb_rvalue); ret = MIN (ret, tret); } } else if (TREE_CODE (t) == BIT_FIELD_REF) { tret = gimplify_expr (&TREE_OPERAND (t, 1), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (ret, tret); tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (ret, tret); } STRIP_USELESS_TYPE_CONVERSION (TREE_OPERAND (t, 0)); /* The innermost expression P may have originally had TREE_SIDE_EFFECTS set which would have caused all the outer expressions in EXPR_P leading to P to also have had TREE_SIDE_EFFECTS set. */ recalculate_side_effects (t); } tret = gimplify_expr (p, pre_p, post_p, is_gimple_min_lval, fallback); ret = MIN (ret, tret); /* If the outermost expression is a COMPONENT_REF, canonicalize its type. */ if ((fallback & fb_rvalue) && TREE_CODE (*expr_p) == COMPONENT_REF) { canonicalize_component_ref (expr_p); ret = MIN (ret, GS_OK); } VEC_free (tree, heap, stack); return ret; } /* Gimplify the self modifying expression pointed to by EXPR_P (++, --, +=, -=). PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. WANT_VALUE is nonzero iff we want to use the value of this expression in another expression. */ static enum gimplify_status gimplify_self_mod_expr (tree *expr_p, tree *pre_p, tree *post_p, bool want_value) { enum tree_code code; tree lhs, lvalue, rhs, t1, post = NULL, *orig_post_p = post_p; bool postfix; enum tree_code arith_code; enum gimplify_status ret; code = TREE_CODE (*expr_p); gcc_assert (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR || code == PREINCREMENT_EXPR || code == PREDECREMENT_EXPR); /* Prefix or postfix? */ if (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR) /* Faster to treat as prefix if result is not used. */ postfix = want_value; else postfix = false; /* For postfix, make sure the inner expression's post side effects are executed after side effects from this expression. */ if (postfix) post_p = &post; /* Add or subtract? */ if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR) arith_code = PLUS_EXPR; else arith_code = MINUS_EXPR; /* Gimplify the LHS into a GIMPLE lvalue. */ lvalue = TREE_OPERAND (*expr_p, 0); ret = gimplify_expr (&lvalue, pre_p, post_p, is_gimple_lvalue, fb_lvalue); if (ret == GS_ERROR) return ret; /* Extract the operands to the arithmetic operation. */ lhs = lvalue; rhs = TREE_OPERAND (*expr_p, 1); /* For postfix operator, we evaluate the LHS to an rvalue and then use that as the result value and in the postqueue operation. */ if (postfix) { ret = gimplify_expr (&lhs, pre_p, post_p, is_gimple_val, fb_rvalue); if (ret == GS_ERROR) return ret; } t1 = build2 (arith_code, TREE_TYPE (*expr_p), lhs, rhs); t1 = build2 (MODIFY_EXPR, TREE_TYPE (lvalue), lvalue, t1); if (postfix) { gimplify_and_add (t1, orig_post_p); append_to_statement_list (post, orig_post_p); *expr_p = lhs; return GS_ALL_DONE; } else { *expr_p = t1; return GS_OK; } } /* If *EXPR_P has a variable sized type, wrap it in a WITH_SIZE_EXPR. */ static void maybe_with_size_expr (tree *expr_p) { tree expr = *expr_p; tree type = TREE_TYPE (expr); tree size; /* If we've already wrapped this or the type is error_mark_node, we can't do anything. */ if (TREE_CODE (expr) == WITH_SIZE_EXPR || type == error_mark_node) return; /* If the size isn't known or is a constant, we have nothing to do. */ size = TYPE_SIZE_UNIT (type); if (!size || TREE_CODE (size) == INTEGER_CST) return; /* Otherwise, make a WITH_SIZE_EXPR. */ size = unshare_expr (size); size = SUBSTITUTE_PLACEHOLDER_IN_EXPR (size, expr); *expr_p = build2 (WITH_SIZE_EXPR, type, expr, size); } /* Subroutine of gimplify_call_expr: Gimplify a single argument. */ static enum gimplify_status gimplify_arg (tree *expr_p, tree *pre_p) { bool (*test) (tree); fallback_t fb; /* In general, we allow lvalues for function arguments to avoid extra overhead of copying large aggregates out of even larger aggregates into temporaries only to copy the temporaries to the argument list. Make optimizers happy by pulling out to temporaries those types that fit in registers. */ if (is_gimple_reg_type (TREE_TYPE (*expr_p))) test = is_gimple_val, fb = fb_rvalue; else test = is_gimple_lvalue, fb = fb_either; /* If this is a variable sized type, we must remember the size. */ maybe_with_size_expr (expr_p); /* There is a sequence point before a function call. Side effects in the argument list must occur before the actual call. So, when gimplifying arguments, force gimplify_expr to use an internal post queue which is then appended to the end of PRE_P. */ return gimplify_expr (expr_p, pre_p, NULL, test, fb); } /* Gimplify the CALL_EXPR node pointed to by EXPR_P. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. WANT_VALUE is true if the result of the call is desired. */ static enum gimplify_status gimplify_call_expr (tree *expr_p, tree *pre_p, bool want_value) { tree decl; tree arglist; enum gimplify_status ret; gcc_assert (TREE_CODE (*expr_p) == CALL_EXPR); /* For reliable diagnostics during inlining, it is necessary that every call_expr be annotated with file and line. */ if (! EXPR_HAS_LOCATION (*expr_p)) SET_EXPR_LOCATION (*expr_p, input_location); /* This may be a call to a builtin function. Builtin function calls may be transformed into different (and more efficient) builtin function calls under certain circumstances. Unfortunately, gimplification can muck things up enough that the builtin expanders are not aware that certain transformations are still valid. So we attempt transformation/gimplification of the call before we gimplify the CALL_EXPR. At this time we do not manage to transform all calls in the same manner as the expanders do, but we do transform most of them. */ decl = get_callee_fndecl (*expr_p); if (decl && DECL_BUILT_IN (decl)) { tree arglist = TREE_OPERAND (*expr_p, 1); tree new = fold_builtin (decl, arglist, !want_value); if (new && new != *expr_p) { /* There was a transformation of this call which computes the same value, but in a more efficient way. Return and try again. */ *expr_p = new; return GS_OK; } if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL && DECL_FUNCTION_CODE (decl) == BUILT_IN_VA_START) { if (!arglist || !TREE_CHAIN (arglist)) { error ("too few arguments to function %<va_start%>"); *expr_p = build_empty_stmt (); return GS_OK; } #ifdef KEY /* bug 10904: This call produces __builtin_va_start(ap,0,0), and "wgen -m32" cannot handle the 2nd constant arg. This would be a problem for wgen only for some cases (constructors and destructors) in C++, for other cases, wgen gets the TREE before this lowering process. */ if (!flag_spin_file) #endif if (fold_builtin_next_arg (TREE_CHAIN (arglist))) { *expr_p = build_empty_stmt (); return GS_OK; } /* Avoid gimplifying the second argument to va_start, which needs to be the plain PARM_DECL. */ return gimplify_arg (&TREE_VALUE (TREE_OPERAND (*expr_p, 1)), pre_p); } } /* There is a sequence point before the call, so any side effects in the calling expression must occur before the actual call. Force gimplify_expr to use an internal post queue. */ ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, NULL, is_gimple_call_addr, fb_rvalue); if (PUSH_ARGS_REVERSED) TREE_OPERAND (*expr_p, 1) = nreverse (TREE_OPERAND (*expr_p, 1)); for (arglist = TREE_OPERAND (*expr_p, 1); arglist; arglist = TREE_CHAIN (arglist)) { enum gimplify_status t; t = gimplify_arg (&TREE_VALUE (arglist), pre_p); if (t == GS_ERROR) ret = GS_ERROR; } if (PUSH_ARGS_REVERSED) TREE_OPERAND (*expr_p, 1) = nreverse (TREE_OPERAND (*expr_p, 1)); /* Try this again in case gimplification exposed something. */ if (ret != GS_ERROR) { decl = get_callee_fndecl (*expr_p); if (decl && DECL_BUILT_IN (decl)) { tree arglist = TREE_OPERAND (*expr_p, 1); tree new = fold_builtin (decl, arglist, !want_value); if (new && new != *expr_p) { /* There was a transformation of this call which computes the same value, but in a more efficient way. Return and try again. */ *expr_p = new; return GS_OK; } } } /* If the function is "const" or "pure", then clear TREE_SIDE_EFFECTS on its decl. This allows us to eliminate redundant or useless calls to "const" functions. */ if (TREE_CODE (*expr_p) == CALL_EXPR && (call_expr_flags (*expr_p) & (ECF_CONST | ECF_PURE))) TREE_SIDE_EFFECTS (*expr_p) = 0; return ret; } /* Handle shortcut semantics in the predicate operand of a COND_EXPR by rewriting it into multiple COND_EXPRs, and possibly GOTO_EXPRs. TRUE_LABEL_P and FALSE_LABEL_P point to the labels to jump to if the condition is true or false, respectively. If null, we should generate our own to skip over the evaluation of this specific expression. This function is the tree equivalent of do_jump. shortcut_cond_r should only be called by shortcut_cond_expr. */ static tree shortcut_cond_r (tree pred, tree *true_label_p, tree *false_label_p) { tree local_label = NULL_TREE; tree t, expr = NULL; /* OK, it's not a simple case; we need to pull apart the COND_EXPR to retain the shortcut semantics. Just insert the gotos here; shortcut_cond_expr will append the real blocks later. */ if (TREE_CODE (pred) == TRUTH_ANDIF_EXPR) { /* Turn if (a && b) into if (a); else goto no; if (b) goto yes; else goto no; (no:) */ if (false_label_p == NULL) false_label_p = &local_label; t = shortcut_cond_r (TREE_OPERAND (pred, 0), NULL, false_label_p); append_to_statement_list (t, &expr); t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p); append_to_statement_list (t, &expr); } else if (TREE_CODE (pred) == TRUTH_ORIF_EXPR) { /* Turn if (a || b) into if (a) goto yes; if (b) goto yes; else goto no; (yes:) */ if (true_label_p == NULL) true_label_p = &local_label; t = shortcut_cond_r (TREE_OPERAND (pred, 0), true_label_p, NULL); append_to_statement_list (t, &expr); t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p); append_to_statement_list (t, &expr); } else if (TREE_CODE (pred) == COND_EXPR) { /* As long as we're messing with gotos, turn if (a ? b : c) into if (a) if (b) goto yes; else goto no; else if (c) goto yes; else goto no; */ expr = build3 (COND_EXPR, void_type_node, TREE_OPERAND (pred, 0), shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p), shortcut_cond_r (TREE_OPERAND (pred, 2), true_label_p, false_label_p)); } else { expr = build3 (COND_EXPR, void_type_node, pred, build_and_jump (true_label_p), build_and_jump (false_label_p)); } if (local_label) { t = build1 (LABEL_EXPR, void_type_node, local_label); append_to_statement_list (t, &expr); } return expr; } static tree shortcut_cond_expr (tree expr) { tree pred = TREE_OPERAND (expr, 0); tree then_ = TREE_OPERAND (expr, 1); tree else_ = TREE_OPERAND (expr, 2); tree true_label, false_label, end_label, t; tree *true_label_p; tree *false_label_p; bool emit_end, emit_false, jump_over_else; bool then_se = then_ && TREE_SIDE_EFFECTS (then_); bool else_se = else_ && TREE_SIDE_EFFECTS (else_); /* First do simple transformations. */ if (!else_se) { /* If there is no 'else', turn (a && b) into if (a) if (b). */ while (TREE_CODE (pred) == TRUTH_ANDIF_EXPR) { TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1); then_ = shortcut_cond_expr (expr); then_se = then_ && TREE_SIDE_EFFECTS (then_); pred = TREE_OPERAND (pred, 0); expr = build3 (COND_EXPR, void_type_node, pred, then_, NULL_TREE); } } if (!then_se) { /* If there is no 'then', turn if (a || b); else d into if (a); else if (b); else d. */ while (TREE_CODE (pred) == TRUTH_ORIF_EXPR) { TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1); else_ = shortcut_cond_expr (expr); else_se = else_ && TREE_SIDE_EFFECTS (else_); pred = TREE_OPERAND (pred, 0); expr = build3 (COND_EXPR, void_type_node, pred, NULL_TREE, else_); } } /* If we're done, great. */ if (TREE_CODE (pred) != TRUTH_ANDIF_EXPR && TREE_CODE (pred) != TRUTH_ORIF_EXPR) return expr; /* Otherwise we need to mess with gotos. Change if (a) c; else d; to if (a); else goto no; c; goto end; no: d; end: and recursively gimplify the condition. */ true_label = false_label = end_label = NULL_TREE; /* If our arms just jump somewhere, hijack those labels so we don't generate jumps to jumps. */ if (then_ && TREE_CODE (then_) == GOTO_EXPR && TREE_CODE (GOTO_DESTINATION (then_)) == LABEL_DECL) { true_label = GOTO_DESTINATION (then_); then_ = NULL; then_se = false; } if (else_ && TREE_CODE (else_) == GOTO_EXPR && TREE_CODE (GOTO_DESTINATION (else_)) == LABEL_DECL) { false_label = GOTO_DESTINATION (else_); else_ = NULL; else_se = false; } /* If we aren't hijacking a label for the 'then' branch, it falls through. */ if (true_label) true_label_p = &true_label; else true_label_p = NULL; /* The 'else' branch also needs a label if it contains interesting code. */ if (false_label || else_se) false_label_p = &false_label; else false_label_p = NULL; /* If there was nothing else in our arms, just forward the label(s). */ if (!then_se && !else_se) return shortcut_cond_r (pred, true_label_p, false_label_p); /* If our last subexpression already has a terminal label, reuse it. */ if (else_se) expr = expr_last (else_); else if (then_se) expr = expr_last (then_); else expr = NULL; if (expr && TREE_CODE (expr) == LABEL_EXPR) end_label = LABEL_EXPR_LABEL (expr); /* If we don't care about jumping to the 'else' branch, jump to the end if the condition is false. */ if (!false_label_p) false_label_p = &end_label; /* We only want to emit these labels if we aren't hijacking them. */ emit_end = (end_label == NULL_TREE); emit_false = (false_label == NULL_TREE); /* We only emit the jump over the else clause if we have to--if the then clause may fall through. Otherwise we can wind up with a useless jump and a useless label at the end of gimplified code, which will cause us to think that this conditional as a whole falls through even if it doesn't. If we then inline a function which ends with such a condition, that can cause us to issue an inappropriate warning about control reaching the end of a non-void function. */ jump_over_else = block_may_fallthru (then_); pred = shortcut_cond_r (pred, true_label_p, false_label_p); expr = NULL; append_to_statement_list (pred, &expr); append_to_statement_list (then_, &expr); if (else_se) { if (jump_over_else) { t = build_and_jump (&end_label); append_to_statement_list (t, &expr); } if (emit_false) { t = build1 (LABEL_EXPR, void_type_node, false_label); append_to_statement_list (t, &expr); } append_to_statement_list (else_, &expr); } if (emit_end && end_label) { t = build1 (LABEL_EXPR, void_type_node, end_label); append_to_statement_list (t, &expr); } return expr; } /* EXPR is used in a boolean context; make sure it has BOOLEAN_TYPE. */ tree gimple_boolify (tree expr) { tree type = TREE_TYPE (expr); if (TREE_CODE (type) == BOOLEAN_TYPE) return expr; switch (TREE_CODE (expr)) { case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: /* Also boolify the arguments of truth exprs. */ TREE_OPERAND (expr, 1) = gimple_boolify (TREE_OPERAND (expr, 1)); /* FALLTHRU */ case TRUTH_NOT_EXPR: TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0)); /* FALLTHRU */ case EQ_EXPR: case NE_EXPR: case LE_EXPR: case GE_EXPR: case LT_EXPR: case GT_EXPR: /* These expressions always produce boolean results. */ TREE_TYPE (expr) = boolean_type_node; return expr; default: /* Other expressions that get here must have boolean values, but might need to be converted to the appropriate mode. */ return fold_convert (boolean_type_node, expr); } } /* Convert the conditional expression pointed to by EXPR_P '(p) ? a : b;' into if (p) if (p) t1 = a; a; else or else t1 = b; b; t1; The second form is used when *EXPR_P is of type void. TARGET is the tree for T1 above. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. */ static enum gimplify_status gimplify_cond_expr (tree *expr_p, tree *pre_p, fallback_t fallback) { tree expr = *expr_p; tree tmp, tmp2, type; enum gimplify_status ret; type = TREE_TYPE (expr); /* If this COND_EXPR has a value, copy the values into a temporary within the arms. */ if (! VOID_TYPE_P (type)) { tree result; if ((fallback & fb_lvalue) == 0) { result = tmp2 = tmp = create_tmp_var (TREE_TYPE (expr), "iftmp"); ret = GS_ALL_DONE; } else { tree type = build_pointer_type (TREE_TYPE (expr)); if (TREE_TYPE (TREE_OPERAND (expr, 1)) != void_type_node) TREE_OPERAND (expr, 1) = build_fold_addr_expr (TREE_OPERAND (expr, 1)); if (TREE_TYPE (TREE_OPERAND (expr, 2)) != void_type_node) TREE_OPERAND (expr, 2) = build_fold_addr_expr (TREE_OPERAND (expr, 2)); tmp2 = tmp = create_tmp_var (type, "iftmp"); expr = build3 (COND_EXPR, void_type_node, TREE_OPERAND (expr, 0), TREE_OPERAND (expr, 1), TREE_OPERAND (expr, 2)); result = build_fold_indirect_ref (tmp); ret = GS_ALL_DONE; } /* Build the then clause, 't1 = a;'. But don't build an assignment if this branch is void; in C++ it can be, if it's a throw. */ if (TREE_TYPE (TREE_OPERAND (expr, 1)) != void_type_node) TREE_OPERAND (expr, 1) = build2 (MODIFY_EXPR, void_type_node, tmp, TREE_OPERAND (expr, 1)); /* Build the else clause, 't1 = b;'. */ if (TREE_TYPE (TREE_OPERAND (expr, 2)) != void_type_node) TREE_OPERAND (expr, 2) = build2 (MODIFY_EXPR, void_type_node, tmp2, TREE_OPERAND (expr, 2)); TREE_TYPE (expr) = void_type_node; recalculate_side_effects (expr); /* Move the COND_EXPR to the prequeue. */ gimplify_and_add (expr, pre_p); *expr_p = result; return ret; } /* Make sure the condition has BOOLEAN_TYPE. */ TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0)); /* Break apart && and || conditions. */ if (TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ANDIF_EXPR || TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ORIF_EXPR) { expr = shortcut_cond_expr (expr); if (expr != *expr_p) { *expr_p = expr; /* We can't rely on gimplify_expr to re-gimplify the expanded form properly, as cleanups might cause the target labels to be wrapped in a TRY_FINALLY_EXPR. To prevent that, we need to set up a conditional context. */ gimple_push_condition (); gimplify_stmt (expr_p); gimple_pop_condition (pre_p); return GS_ALL_DONE; } } /* Now do the normal gimplification. */ ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, NULL, is_gimple_condexpr, fb_rvalue); gimple_push_condition (); gimplify_to_stmt_list (&TREE_OPERAND (expr, 1)); gimplify_to_stmt_list (&TREE_OPERAND (expr, 2)); recalculate_side_effects (expr); gimple_pop_condition (pre_p); if (ret == GS_ERROR) ; else if (TREE_SIDE_EFFECTS (TREE_OPERAND (expr, 1))) ret = GS_ALL_DONE; else if (TREE_SIDE_EFFECTS (TREE_OPERAND (expr, 2))) /* Rewrite "if (a); else b" to "if (!a) b" */ { TREE_OPERAND (expr, 0) = invert_truthvalue (TREE_OPERAND (expr, 0)); ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, NULL, is_gimple_condexpr, fb_rvalue); tmp = TREE_OPERAND (expr, 1); TREE_OPERAND (expr, 1) = TREE_OPERAND (expr, 2); TREE_OPERAND (expr, 2) = tmp; } else /* Both arms are empty; replace the COND_EXPR with its predicate. */ expr = TREE_OPERAND (expr, 0); *expr_p = expr; return ret; } /* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with a call to __builtin_memcpy. */ static enum gimplify_status gimplify_modify_expr_to_memcpy (tree *expr_p, tree size, bool want_value) { tree args, t, to, to_ptr, from; to = TREE_OPERAND (*expr_p, 0); from = TREE_OPERAND (*expr_p, 1); args = tree_cons (NULL, size, NULL); t = build_fold_addr_expr (from); args = tree_cons (NULL, t, args); to_ptr = build_fold_addr_expr (to); args = tree_cons (NULL, to_ptr, args); t = implicit_built_in_decls[BUILT_IN_MEMCPY]; t = build_function_call_expr (t, args); if (want_value) { t = build1 (NOP_EXPR, TREE_TYPE (to_ptr), t); t = build1 (INDIRECT_REF, TREE_TYPE (to), t); } *expr_p = t; return GS_OK; } /* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with a call to __builtin_memset. In this case we know that the RHS is a CONSTRUCTOR with an empty element list. */ static enum gimplify_status gimplify_modify_expr_to_memset (tree *expr_p, tree size, bool want_value) { tree args, t, to, to_ptr; to = TREE_OPERAND (*expr_p, 0); args = tree_cons (NULL, size, NULL); args = tree_cons (NULL, integer_zero_node, args); to_ptr = build_fold_addr_expr (to); args = tree_cons (NULL, to_ptr, args); t = implicit_built_in_decls[BUILT_IN_MEMSET]; t = build_function_call_expr (t, args); if (want_value) { t = build1 (NOP_EXPR, TREE_TYPE (to_ptr), t); t = build1 (INDIRECT_REF, TREE_TYPE (to), t); } *expr_p = t; return GS_OK; } /* A subroutine of gimplify_init_ctor_preeval. Called via walk_tree, determine, cautiously, if a CONSTRUCTOR overlaps the lhs of an assignment. Returns non-null if we detect a potential overlap. */ struct gimplify_init_ctor_preeval_data { /* The base decl of the lhs object. May be NULL, in which case we have to assume the lhs is indirect. */ tree lhs_base_decl; /* The alias set of the lhs object. */ int lhs_alias_set; }; static tree gimplify_init_ctor_preeval_1 (tree *tp, int *walk_subtrees, void *xdata) { struct gimplify_init_ctor_preeval_data *data = (struct gimplify_init_ctor_preeval_data *) xdata; tree t = *tp; /* If we find the base object, obviously we have overlap. */ if (data->lhs_base_decl == t) return t; /* If the constructor component is indirect, determine if we have a potential overlap with the lhs. The only bits of information we have to go on at this point are addressability and alias sets. */ if (TREE_CODE (t) == INDIRECT_REF && (!data->lhs_base_decl || TREE_ADDRESSABLE (data->lhs_base_decl)) && alias_sets_conflict_p (data->lhs_alias_set, get_alias_set (t))) return t; /* If the constructor component is a call, determine if it can hide a potential overlap with the lhs through an INDIRECT_REF like above. */ if (TREE_CODE (t) == CALL_EXPR) { tree type, fntype = TREE_TYPE (TREE_TYPE (TREE_OPERAND (t, 0))); for (type = TYPE_ARG_TYPES (fntype); type; type = TREE_CHAIN (type)) if (POINTER_TYPE_P (TREE_VALUE (type)) && (!data->lhs_base_decl || TREE_ADDRESSABLE (data->lhs_base_decl)) && alias_sets_conflict_p (data->lhs_alias_set, get_alias_set (TREE_TYPE (TREE_VALUE (type))))) return t; } if (IS_TYPE_OR_DECL_P (t)) *walk_subtrees = 0; return NULL; } /* A subroutine of gimplify_init_constructor. Pre-evaluate *EXPR_P, force values that overlap with the lhs (as described by *DATA) into temporaries. */ static void gimplify_init_ctor_preeval (tree *expr_p, tree *pre_p, tree *post_p, struct gimplify_init_ctor_preeval_data *data) { enum gimplify_status one; /* If the value is invariant, then there's nothing to pre-evaluate. But ensure it doesn't have any side-effects since a SAVE_EXPR is invariant but has side effects and might contain a reference to the object we're initializing. */ if (TREE_INVARIANT (*expr_p) && !TREE_SIDE_EFFECTS (*expr_p)) return; /* If the type has non-trivial constructors, we can't pre-evaluate. */ if (TREE_ADDRESSABLE (TREE_TYPE (*expr_p))) return; /* Recurse for nested constructors. */ if (TREE_CODE (*expr_p) == CONSTRUCTOR) { unsigned HOST_WIDE_INT ix; constructor_elt *ce; VEC(constructor_elt,gc) *v = CONSTRUCTOR_ELTS (*expr_p); for (ix = 0; VEC_iterate (constructor_elt, v, ix, ce); ix++) gimplify_init_ctor_preeval (&ce->value, pre_p, post_p, data); return; } /* If this is a variable sized type, we must remember the size. */ maybe_with_size_expr (expr_p); /* Gimplify the constructor element to something appropriate for the rhs of a MODIFY_EXPR. Given that we know the lhs is an aggregate, we know the gimplifier will consider this a store to memory. Doing this gimplification now means that we won't have to deal with complicated language-specific trees, nor trees like SAVE_EXPR that can induce exponential search behavior. */ one = gimplify_expr (expr_p, pre_p, post_p, is_gimple_mem_rhs, fb_rvalue); if (one == GS_ERROR) { *expr_p = NULL; return; } /* If we gimplified to a bare decl, we can be sure that it doesn't overlap with the lhs, since "a = { .x=a }" doesn't make sense. This will always be true for all scalars, since is_gimple_mem_rhs insists on a temporary variable for them. */ if (DECL_P (*expr_p)) return; /* If this is of variable size, we have no choice but to assume it doesn't overlap since we can't make a temporary for it. */ if (TREE_CODE (TYPE_SIZE (TREE_TYPE (*expr_p))) != INTEGER_CST) return; /* Otherwise, we must search for overlap ... */ if (!walk_tree (expr_p, gimplify_init_ctor_preeval_1, data, NULL)) return; /* ... and if found, force the value into a temporary. */ *expr_p = get_formal_tmp_var (*expr_p, pre_p); } /* A subroutine of gimplify_init_ctor_eval. Create a loop for a RANGE_EXPR in a CONSTRUCTOR for an array. var = lower; loop_entry: object[var] = value; if (var == upper) goto loop_exit; var = var + 1; goto loop_entry; loop_exit: We increment var _after_ the loop exit check because we might otherwise fail if upper == TYPE_MAX_VALUE (type for upper). Note that we never have to deal with SAVE_EXPRs here, because this has already been taken care of for us, in gimplify_init_ctor_preeval(). */ static void gimplify_init_ctor_eval (tree, VEC(constructor_elt,gc) *, tree *, bool); static void gimplify_init_ctor_eval_range (tree object, tree lower, tree upper, tree value, tree array_elt_type, tree *pre_p, bool cleared) { tree loop_entry_label, loop_exit_label; tree var, var_type, cref; loop_entry_label = create_artificial_label (); loop_exit_label = create_artificial_label (); /* Create and initialize the index variable. */ var_type = TREE_TYPE (upper); var = create_tmp_var (var_type, NULL); append_to_statement_list (build2 (MODIFY_EXPR, var_type, var, lower), pre_p); /* Add the loop entry label. */ append_to_statement_list (build1 (LABEL_EXPR, void_type_node, loop_entry_label), pre_p); /* Build the reference. */ cref = build4 (ARRAY_REF, array_elt_type, unshare_expr (object), var, NULL_TREE, NULL_TREE); /* If we are a constructor, just call gimplify_init_ctor_eval to do the store. Otherwise just assign value to the reference. */ if (TREE_CODE (value) == CONSTRUCTOR) /* NB we might have to call ourself recursively through gimplify_init_ctor_eval if the value is a constructor. */ gimplify_init_ctor_eval (cref, CONSTRUCTOR_ELTS (value), pre_p, cleared); else append_to_statement_list (build2 (MODIFY_EXPR, TREE_TYPE (cref), cref, value), pre_p); /* We exit the loop when the index var is equal to the upper bound. */ gimplify_and_add (build3 (COND_EXPR, void_type_node, build2 (EQ_EXPR, boolean_type_node, var, upper), build1 (GOTO_EXPR, void_type_node, loop_exit_label), NULL_TREE), pre_p); /* Otherwise, increment the index var... */ append_to_statement_list (build2 (MODIFY_EXPR, var_type, var, build2 (PLUS_EXPR, var_type, var, fold_convert (var_type, integer_one_node))), pre_p); /* ...and jump back to the loop entry. */ append_to_statement_list (build1 (GOTO_EXPR, void_type_node, loop_entry_label), pre_p); /* Add the loop exit label. */ append_to_statement_list (build1 (LABEL_EXPR, void_type_node, loop_exit_label), pre_p); } /* Return true if FDECL is accessing a field that is zero sized. */ static bool zero_sized_field_decl (tree fdecl) { if (TREE_CODE (fdecl) == FIELD_DECL && DECL_SIZE (fdecl) && integer_zerop (DECL_SIZE (fdecl))) return true; return false; } /* Return true if TYPE is zero sized. */ static bool zero_sized_type (tree type) { if (AGGREGATE_TYPE_P (type) && TYPE_SIZE (type) && integer_zerop (TYPE_SIZE (type))) return true; return false; } /* A subroutine of gimplify_init_constructor. Generate individual MODIFY_EXPRs for a CONSTRUCTOR. OBJECT is the LHS against which the assignments should happen. ELTS is the CONSTRUCTOR_ELTS of the CONSTRUCTOR. CLEARED is true if the entire LHS object has been zeroed first. */ static void gimplify_init_ctor_eval (tree object, VEC(constructor_elt,gc) *elts, tree *pre_p, bool cleared) { tree array_elt_type = NULL; unsigned HOST_WIDE_INT ix; tree purpose, value; if (TREE_CODE (TREE_TYPE (object)) == ARRAY_TYPE) array_elt_type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (object))); FOR_EACH_CONSTRUCTOR_ELT (elts, ix, purpose, value) { tree cref, init; /* NULL values are created above for gimplification errors. */ if (value == NULL) continue; if (cleared && initializer_zerop (value)) continue; /* ??? Here's to hoping the front end fills in all of the indices, so we don't have to figure out what's missing ourselves. */ gcc_assert (purpose); /* Skip zero-sized fields, unless value has side-effects. This can happen with calls to functions returning a zero-sized type, which we shouldn't discard. As a number of downstream passes don't expect sets of zero-sized fields, we rely on the gimplification of the MODIFY_EXPR we make below to drop the assignment statement. */ if (! TREE_SIDE_EFFECTS (value) && zero_sized_field_decl (purpose)) continue; /* If we have a RANGE_EXPR, we have to build a loop to assign the whole range. */ if (TREE_CODE (purpose) == RANGE_EXPR) { tree lower = TREE_OPERAND (purpose, 0); tree upper = TREE_OPERAND (purpose, 1); /* If the lower bound is equal to upper, just treat it as if upper was the index. */ if (simple_cst_equal (lower, upper)) purpose = upper; else { gimplify_init_ctor_eval_range (object, lower, upper, value, array_elt_type, pre_p, cleared); continue; } } if (array_elt_type) { cref = build4 (ARRAY_REF, array_elt_type, unshare_expr (object), purpose, NULL_TREE, NULL_TREE); } else { gcc_assert (TREE_CODE (purpose) == FIELD_DECL); cref = build3 (COMPONENT_REF, TREE_TYPE (purpose), unshare_expr (object), purpose, NULL_TREE); } if (TREE_CODE (value) == CONSTRUCTOR && TREE_CODE (TREE_TYPE (value)) != VECTOR_TYPE) gimplify_init_ctor_eval (cref, CONSTRUCTOR_ELTS (value), pre_p, cleared); else { init = build2 (INIT_EXPR, TREE_TYPE (cref), cref, value); gimplify_and_add (init, pre_p); } } } /* A subroutine of gimplify_modify_expr. Break out elements of a CONSTRUCTOR used as an initializer into separate MODIFY_EXPRs. Note that we still need to clear any elements that don't have explicit initializers, so if not all elements are initialized we keep the original MODIFY_EXPR, we just remove all of the constructor elements. */ static enum gimplify_status gimplify_init_constructor (tree *expr_p, tree *pre_p, tree *post_p, bool want_value) { tree object; tree ctor = TREE_OPERAND (*expr_p, 1); tree type = TREE_TYPE (ctor); enum gimplify_status ret; VEC(constructor_elt,gc) *elts; if (TREE_CODE (ctor) != CONSTRUCTOR) return GS_UNHANDLED; ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_lvalue, fb_lvalue); if (ret == GS_ERROR) return ret; object = TREE_OPERAND (*expr_p, 0); elts = CONSTRUCTOR_ELTS (ctor); ret = GS_ALL_DONE; switch (TREE_CODE (type)) { case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: case ARRAY_TYPE: { struct gimplify_init_ctor_preeval_data preeval_data; HOST_WIDE_INT num_type_elements, num_ctor_elements; HOST_WIDE_INT num_nonzero_elements; bool cleared, valid_const_initializer; /* Aggregate types must lower constructors to initialization of individual elements. The exception is that a CONSTRUCTOR node with no elements indicates zero-initialization of the whole. */ if (VEC_empty (constructor_elt, elts)) break; /* Fetch information about the constructor to direct later processing. We might want to make static versions of it in various cases, and can only do so if it known to be a valid constant initializer. */ valid_const_initializer = categorize_ctor_elements (ctor, &num_nonzero_elements, &num_ctor_elements, &cleared); /* If a const aggregate variable is being initialized, then it should never be a lose to promote the variable to be static. */ if (valid_const_initializer && num_nonzero_elements > 1 && TREE_READONLY (object) && TREE_CODE (object) == VAR_DECL) { DECL_INITIAL (object) = ctor; TREE_STATIC (object) = 1; if (!DECL_NAME (object)) DECL_NAME (object) = create_tmp_var_name ("C"); walk_tree (&DECL_INITIAL (object), force_labels_r, NULL, NULL); /* ??? C++ doesn't automatically append a .<number> to the assembler name, and even when it does, it looks a FE private data structures to figure out what that number should be, which are not set for this variable. I suppose this is important for local statics for inline functions, which aren't "local" in the object file sense. So in order to get a unique TU-local symbol, we must invoke the lhd version now. */ lhd_set_decl_assembler_name (object); *expr_p = NULL_TREE; break; } /* If there are "lots" of initialized elements, even discounting those that are not address constants (and thus *must* be computed at runtime), then partition the constructor into constant and non-constant parts. Block copy the constant parts in, then generate code for the non-constant parts. */ /* TODO. There's code in cp/typeck.c to do this. */ num_type_elements = count_type_elements (type, true); /* If count_type_elements could not determine number of type elements for a constant-sized object, assume clearing is needed. Don't do this for variable-sized objects, as store_constructor will ignore the clearing of variable-sized objects. */ if (num_type_elements < 0 && int_size_in_bytes (type) >= 0) cleared = true; /* If there are "lots" of zeros, then block clear the object first. */ else if (num_type_elements - num_nonzero_elements > CLEAR_RATIO && num_nonzero_elements < num_type_elements/4) cleared = true; /* ??? This bit ought not be needed. For any element not present in the initializer, we should simply set them to zero. Except we'd need to *find* the elements that are not present, and that requires trickery to avoid quadratic compile-time behavior in large cases or excessive memory use in small cases. */ else if (num_ctor_elements < num_type_elements) cleared = true; /* If there are "lots" of initialized elements, and all of them are valid address constants, then the entire initializer can be dropped to memory, and then memcpy'd out. Don't do this for sparse arrays, though, as it's more efficient to follow the standard CONSTRUCTOR behavior of memset followed by individual element initialization. */ if (valid_const_initializer && !cleared) { HOST_WIDE_INT size = int_size_in_bytes (type); unsigned int align; /* ??? We can still get unbounded array types, at least from the C++ front end. This seems wrong, but attempt to work around it for now. */ if (size < 0) { size = int_size_in_bytes (TREE_TYPE (object)); if (size >= 0) TREE_TYPE (ctor) = type = TREE_TYPE (object); } /* Find the maximum alignment we can assume for the object. */ /* ??? Make use of DECL_OFFSET_ALIGN. */ if (DECL_P (object)) align = DECL_ALIGN (object); else align = TYPE_ALIGN (type); if (size > 0 && !can_move_by_pieces (size, align)) { tree new = create_tmp_var_raw (type, "C"); gimple_add_tmp_var (new); TREE_STATIC (new) = 1; TREE_READONLY (new) = 1; DECL_INITIAL (new) = ctor; if (align > DECL_ALIGN (new)) { DECL_ALIGN (new) = align; DECL_USER_ALIGN (new) = 1; } walk_tree (&DECL_INITIAL (new), force_labels_r, NULL, NULL); TREE_OPERAND (*expr_p, 1) = new; /* This is no longer an assignment of a CONSTRUCTOR, but we still may have processing to do on the LHS. So pretend we didn't do anything here to let that happen. */ return GS_UNHANDLED; } } /* If there are nonzero elements, pre-evaluate to capture elements overlapping with the lhs into temporaries. We must do this before clearing to fetch the values before they are zeroed-out. */ if (num_nonzero_elements > 0) { preeval_data.lhs_base_decl = get_base_address (object); if (!DECL_P (preeval_data.lhs_base_decl)) preeval_data.lhs_base_decl = NULL; preeval_data.lhs_alias_set = get_alias_set (object); gimplify_init_ctor_preeval (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, &preeval_data); } if (cleared) { /* Zap the CONSTRUCTOR element list, which simplifies this case. Note that we still have to gimplify, in order to handle the case of variable sized types. Avoid shared tree structures. */ CONSTRUCTOR_ELTS (ctor) = NULL; object = unshare_expr (object); gimplify_stmt (expr_p); append_to_statement_list (*expr_p, pre_p); } /* If we have not block cleared the object, or if there are nonzero elements in the constructor, add assignments to the individual scalar fields of the object. */ if (!cleared || num_nonzero_elements > 0) gimplify_init_ctor_eval (object, elts, pre_p, cleared); *expr_p = NULL_TREE; } break; case COMPLEX_TYPE: { tree r, i; /* Extract the real and imaginary parts out of the ctor. */ gcc_assert (VEC_length (constructor_elt, elts) == 2); r = VEC_index (constructor_elt, elts, 0)->value; i = VEC_index (constructor_elt, elts, 1)->value; if (r == NULL || i == NULL) { tree zero = fold_convert (TREE_TYPE (type), integer_zero_node); if (r == NULL) r = zero; if (i == NULL) i = zero; } /* Complex types have either COMPLEX_CST or COMPLEX_EXPR to represent creation of a complex value. */ if (TREE_CONSTANT (r) && TREE_CONSTANT (i)) { ctor = build_complex (type, r, i); TREE_OPERAND (*expr_p, 1) = ctor; } else { ctor = build2 (COMPLEX_EXPR, type, r, i); TREE_OPERAND (*expr_p, 1) = ctor; ret = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, rhs_predicate_for (TREE_OPERAND (*expr_p, 0)), fb_rvalue); } } break; case VECTOR_TYPE: { unsigned HOST_WIDE_INT ix; constructor_elt *ce; /* Go ahead and simplify constant constructors to VECTOR_CST. */ if (TREE_CONSTANT (ctor)) { bool constant_p = true; tree value; /* Even when ctor is constant, it might contain non-*_CST elements (e.g. { 1.0/0.0 - 1.0/0.0, 0.0 }) and those don't belong into VECTOR_CST nodes. */ FOR_EACH_CONSTRUCTOR_VALUE (elts, ix, value) if (!CONSTANT_CLASS_P (value)) { constant_p = false; break; } if (constant_p) { TREE_OPERAND (*expr_p, 1) = build_vector_from_ctor (type, elts); break; } /* Don't reduce a TREE_CONSTANT vector ctor even if we can't make a VECTOR_CST. It won't do anything for us, and it'll prevent us from representing it as a single constant. */ break; } /* Vector types use CONSTRUCTOR all the way through gimple compilation as a general initializer. */ for (ix = 0; VEC_iterate (constructor_elt, elts, ix, ce); ix++) { enum gimplify_status tret; tret = gimplify_expr (&ce->value, pre_p, post_p, is_gimple_val, fb_rvalue); if (tret == GS_ERROR) ret = GS_ERROR; } } break; default: /* So how did we get a CONSTRUCTOR for a scalar type? */ gcc_unreachable (); } if (ret == GS_ERROR) return GS_ERROR; else if (want_value) { append_to_statement_list (*expr_p, pre_p); *expr_p = object; return GS_OK; } else return GS_ALL_DONE; } /* Given a pointer value OP0, return a simplified version of an indirection through OP0, or NULL_TREE if no simplification is possible. This may only be applied to a rhs of an expression. Note that the resulting type may be different from the type pointed to in the sense that it is still compatible from the langhooks point of view. */ static tree fold_indirect_ref_rhs (tree t) { tree type = TREE_TYPE (TREE_TYPE (t)); tree sub = t; tree subtype; STRIP_USELESS_TYPE_CONVERSION (sub); subtype = TREE_TYPE (sub); if (!POINTER_TYPE_P (subtype)) return NULL_TREE; if (TREE_CODE (sub) == ADDR_EXPR) { tree op = TREE_OPERAND (sub, 0); tree optype = TREE_TYPE (op); /* *&p => p */ if (lang_hooks.types_compatible_p (type, optype)) return op; /* *(foo *)&fooarray => fooarray[0] */ else if (TREE_CODE (optype) == ARRAY_TYPE && lang_hooks.types_compatible_p (type, TREE_TYPE (optype))) { tree type_domain = TYPE_DOMAIN (optype); tree min_val = size_zero_node; if (type_domain && TYPE_MIN_VALUE (type_domain)) min_val = TYPE_MIN_VALUE (type_domain); return build4 (ARRAY_REF, type, op, min_val, NULL_TREE, NULL_TREE); } } /* *(foo *)fooarrptr => (*fooarrptr)[0] */ if (TREE_CODE (TREE_TYPE (subtype)) == ARRAY_TYPE && lang_hooks.types_compatible_p (type, TREE_TYPE (TREE_TYPE (subtype)))) { tree type_domain; tree min_val = size_zero_node; tree osub = sub; sub = fold_indirect_ref_rhs (sub); if (! sub) sub = build1 (INDIRECT_REF, TREE_TYPE (subtype), osub); type_domain = TYPE_DOMAIN (TREE_TYPE (sub)); if (type_domain && TYPE_MIN_VALUE (type_domain)) min_val = TYPE_MIN_VALUE (type_domain); return build4 (ARRAY_REF, type, sub, min_val, NULL_TREE, NULL_TREE); } return NULL_TREE; } /* Subroutine of gimplify_modify_expr to do simplifications of MODIFY_EXPRs based on the code of the RHS. We loop for as long as something changes. */ static enum gimplify_status gimplify_modify_expr_rhs (tree *expr_p, tree *from_p, tree *to_p, tree *pre_p, tree *post_p, bool want_value) { enum gimplify_status ret = GS_OK; while (ret != GS_UNHANDLED) switch (TREE_CODE (*from_p)) { case INDIRECT_REF: { /* If we have code like *(const A*)(A*)&x where the type of "x" is a (possibly cv-qualified variant of "A"), treat the entire expression as identical to "x". This kind of code arises in C++ when an object is bound to a const reference, and if "x" is a TARGET_EXPR we want to take advantage of the optimization below. */ tree t = fold_indirect_ref_rhs (TREE_OPERAND (*from_p, 0)); if (t) { *from_p = t; ret = GS_OK; } else ret = GS_UNHANDLED; break; } case TARGET_EXPR: { /* If we are initializing something from a TARGET_EXPR, strip the TARGET_EXPR and initialize it directly, if possible. This can't be done if the initializer is void, since that implies that the temporary is set in some non-trivial way. ??? What about code that pulls out the temp and uses it elsewhere? I think that such code never uses the TARGET_EXPR as an initializer. If I'm wrong, we'll die because the temp won't have any RTL. In that case, I guess we'll need to replace references somehow. */ tree init = TARGET_EXPR_INITIAL (*from_p); if (!VOID_TYPE_P (TREE_TYPE (init))) { *from_p = init; ret = GS_OK; } else ret = GS_UNHANDLED; } break; case COMPOUND_EXPR: /* Remove any COMPOUND_EXPR in the RHS so the following cases will be caught. */ gimplify_compound_expr (from_p, pre_p, true); ret = GS_OK; break; case CONSTRUCTOR: /* If we're initializing from a CONSTRUCTOR, break this into individual MODIFY_EXPRs. */ return gimplify_init_constructor (expr_p, pre_p, post_p, want_value); case COND_EXPR: /* If we're assigning to a non-register type, push the assignment down into the branches. This is mandatory for ADDRESSABLE types, since we cannot generate temporaries for such, but it saves a copy in other cases as well. */ if (!is_gimple_reg_type (TREE_TYPE (*from_p))) { /* This code should mirror the code in gimplify_cond_expr. */ enum tree_code code = TREE_CODE (*expr_p); tree cond = *from_p; tree result = *to_p; ret = gimplify_expr (&result, pre_p, post_p, is_gimple_min_lval, fb_lvalue); if (ret != GS_ERROR) ret = GS_OK; if (TREE_TYPE (TREE_OPERAND (cond, 1)) != void_type_node) TREE_OPERAND (cond, 1) = build2 (code, void_type_node, result, TREE_OPERAND (cond, 1)); if (TREE_TYPE (TREE_OPERAND (cond, 2)) != void_type_node) TREE_OPERAND (cond, 2) = build2 (code, void_type_node, unshare_expr (result), TREE_OPERAND (cond, 2)); TREE_TYPE (cond) = void_type_node; recalculate_side_effects (cond); if (want_value) { gimplify_and_add (cond, pre_p); *expr_p = unshare_expr (result); } else *expr_p = cond; return ret; } else ret = GS_UNHANDLED; break; case CALL_EXPR: /* For calls that return in memory, give *to_p as the CALL_EXPR's return slot so that we don't generate a temporary. */ if (!CALL_EXPR_RETURN_SLOT_OPT (*from_p) && aggregate_value_p (*from_p, *from_p)) { bool use_target; if (!(rhs_predicate_for (*to_p))(*from_p)) /* If we need a temporary, *to_p isn't accurate. */ use_target = false; else if (TREE_CODE (*to_p) == RESULT_DECL && DECL_NAME (*to_p) == NULL_TREE && needs_to_live_in_memory (*to_p)) /* It's OK to use the return slot directly unless it's an NRV. */ use_target = true; else if (is_gimple_reg_type (TREE_TYPE (*to_p)) || (DECL_P (*to_p) && DECL_REGISTER (*to_p))) /* Don't force regs into memory. */ use_target = false; else if (TREE_CODE (*to_p) == VAR_DECL && DECL_GIMPLE_FORMAL_TEMP_P (*to_p)) /* Don't use the original target if it's a formal temp; we don't want to take their addresses. */ use_target = false; else if (TREE_CODE (*expr_p) == INIT_EXPR) /* It's OK to use the target directly if it's being initialized. */ use_target = true; else if (!is_gimple_non_addressable (*to_p)) /* Don't use the original target if it's already addressable; if its address escapes, and the called function uses the NRV optimization, a conforming program could see *to_p change before the called function returns; see c++/19317. When optimizing, the return_slot pass marks more functions as safe after we have escape info. */ use_target = false; else use_target = true; if (use_target) { CALL_EXPR_RETURN_SLOT_OPT (*from_p) = 1; lang_hooks.mark_addressable (*to_p); } } ret = GS_UNHANDLED; break; /* If we're initializing from a container, push the initialization inside it. */ case CLEANUP_POINT_EXPR: case BIND_EXPR: case STATEMENT_LIST: { tree wrap = *from_p; tree t; ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_min_lval, fb_lvalue); if (ret != GS_ERROR) ret = GS_OK; t = voidify_wrapper_expr (wrap, *expr_p); gcc_assert (t == *expr_p); if (want_value) { gimplify_and_add (wrap, pre_p); *expr_p = unshare_expr (*to_p); } else *expr_p = wrap; return GS_OK; } default: ret = GS_UNHANDLED; break; } return ret; } /* Promote partial stores to COMPLEX variables to total stores. *EXPR_P is a MODIFY_EXPR with a lhs of a REAL/IMAGPART_EXPR of a variable with DECL_COMPLEX_GIMPLE_REG_P set. */ static enum gimplify_status gimplify_modify_expr_complex_part (tree *expr_p, tree *pre_p, bool want_value) { enum tree_code code, ocode; tree lhs, rhs, new_rhs, other, realpart, imagpart; lhs = TREE_OPERAND (*expr_p, 0); rhs = TREE_OPERAND (*expr_p, 1); code = TREE_CODE (lhs); lhs = TREE_OPERAND (lhs, 0); ocode = code == REALPART_EXPR ? IMAGPART_EXPR : REALPART_EXPR; other = build1 (ocode, TREE_TYPE (rhs), lhs); other = get_formal_tmp_var (other, pre_p); realpart = code == REALPART_EXPR ? rhs : other; imagpart = code == REALPART_EXPR ? other : rhs; if (TREE_CONSTANT (realpart) && TREE_CONSTANT (imagpart)) new_rhs = build_complex (TREE_TYPE (lhs), realpart, imagpart); else new_rhs = build2 (COMPLEX_EXPR, TREE_TYPE (lhs), realpart, imagpart); TREE_OPERAND (*expr_p, 0) = lhs; TREE_OPERAND (*expr_p, 1) = new_rhs; if (want_value) { append_to_statement_list (*expr_p, pre_p); *expr_p = rhs; } return GS_ALL_DONE; } /* Gimplify the MODIFY_EXPR node pointed to by EXPR_P. modify_expr : varname '=' rhs | '*' ID '=' rhs PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. WANT_VALUE is nonzero iff we want to use the value of this expression in another expression. */ static enum gimplify_status gimplify_modify_expr (tree *expr_p, tree *pre_p, tree *post_p, bool want_value) { tree *from_p = &TREE_OPERAND (*expr_p, 1); tree *to_p = &TREE_OPERAND (*expr_p, 0); enum gimplify_status ret = GS_UNHANDLED; gcc_assert (TREE_CODE (*expr_p) == MODIFY_EXPR || TREE_CODE (*expr_p) == INIT_EXPR); /* For zero sized types only gimplify the left hand side and right hand side as statements and throw away the assignment. */ if (zero_sized_type (TREE_TYPE (*from_p))) { gimplify_stmt (from_p); gimplify_stmt (to_p); append_to_statement_list (*from_p, pre_p); append_to_statement_list (*to_p, pre_p); *expr_p = NULL_TREE; return GS_ALL_DONE; } /* See if any simplifications can be done based on what the RHS is. */ ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p, want_value); if (ret != GS_UNHANDLED) return ret; /* If the value being copied is of variable width, compute the length of the copy into a WITH_SIZE_EXPR. Note that we need to do this before gimplifying any of the operands so that we can resolve any PLACEHOLDER_EXPRs in the size. Also note that the RTL expander uses the size of the expression to be copied, not of the destination, so that is what we must here. */ maybe_with_size_expr (from_p); ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_lvalue, fb_lvalue); if (ret == GS_ERROR) return ret; ret = gimplify_expr (from_p, pre_p, post_p, rhs_predicate_for (*to_p), fb_rvalue); if (ret == GS_ERROR) return ret; /* Now see if the above changed *from_p to something we handle specially. */ ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p, want_value); if (ret != GS_UNHANDLED) return ret; /* If we've got a variable sized assignment between two lvalues (i.e. does not involve a call), then we can make things a bit more straightforward by converting the assignment to memcpy or memset. */ if (TREE_CODE (*from_p) == WITH_SIZE_EXPR) { tree from = TREE_OPERAND (*from_p, 0); tree size = TREE_OPERAND (*from_p, 1); if (TREE_CODE (from) == CONSTRUCTOR) return gimplify_modify_expr_to_memset (expr_p, size, want_value); if (is_gimple_addressable (from)) { *from_p = from; return gimplify_modify_expr_to_memcpy (expr_p, size, want_value); } } /* Transform partial stores to non-addressable complex variables into total stores. This allows us to use real instead of virtual operands for these variables, which improves optimization. */ if ((TREE_CODE (*to_p) == REALPART_EXPR || TREE_CODE (*to_p) == IMAGPART_EXPR) && is_gimple_reg (TREE_OPERAND (*to_p, 0))) return gimplify_modify_expr_complex_part (expr_p, pre_p, want_value); if (gimplify_ctxp->into_ssa && is_gimple_reg (*to_p)) { /* If we've somehow already got an SSA_NAME on the LHS, then we're probably modified it twice. Not good. */ gcc_assert (TREE_CODE (*to_p) != SSA_NAME); *to_p = make_ssa_name (*to_p, *expr_p); } if (want_value) { append_to_statement_list (*expr_p, pre_p); *expr_p = *to_p; return GS_OK; } return GS_ALL_DONE; } /* Gimplify a comparison between two variable-sized objects. Do this with a call to BUILT_IN_MEMCMP. */ static enum gimplify_status gimplify_variable_sized_compare (tree *expr_p) { tree op0 = TREE_OPERAND (*expr_p, 0); tree op1 = TREE_OPERAND (*expr_p, 1); tree args, t, dest; t = TYPE_SIZE_UNIT (TREE_TYPE (op0)); t = unshare_expr (t); t = SUBSTITUTE_PLACEHOLDER_IN_EXPR (t, op0); args = tree_cons (NULL, t, NULL); t = build_fold_addr_expr (op1); args = tree_cons (NULL, t, args); dest = build_fold_addr_expr (op0); args = tree_cons (NULL, dest, args); t = implicit_built_in_decls[BUILT_IN_MEMCMP]; t = build_function_call_expr (t, args); *expr_p = build2 (TREE_CODE (*expr_p), TREE_TYPE (*expr_p), t, integer_zero_node); return GS_OK; } /* Gimplify a comparison between two aggregate objects of integral scalar mode as a comparison between the bitwise equivalent scalar values. */ static enum gimplify_status gimplify_scalar_mode_aggregate_compare (tree *expr_p) { tree op0 = TREE_OPERAND (*expr_p, 0); tree op1 = TREE_OPERAND (*expr_p, 1); tree type = TREE_TYPE (op0); tree scalar_type = lang_hooks.types.type_for_mode (TYPE_MODE (type), 1); op0 = fold_build1 (VIEW_CONVERT_EXPR, scalar_type, op0); op1 = fold_build1 (VIEW_CONVERT_EXPR, scalar_type, op1); *expr_p = fold_build2 (TREE_CODE (*expr_p), TREE_TYPE (*expr_p), op0, op1); return GS_OK; } /* Gimplify TRUTH_ANDIF_EXPR and TRUTH_ORIF_EXPR expressions. EXPR_P points to the expression to gimplify. Expressions of the form 'a && b' are gimplified to: a && b ? true : false gimplify_cond_expr will do the rest. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. */ static enum gimplify_status gimplify_boolean_expr (tree *expr_p) { /* Preserve the original type of the expression. */ tree type = TREE_TYPE (*expr_p); *expr_p = build3 (COND_EXPR, type, *expr_p, fold_convert (type, boolean_true_node), fold_convert (type, boolean_false_node)); return GS_OK; } /* Gimplifies an expression sequence. This function gimplifies each expression and re-writes the original expression with the last expression of the sequence in GIMPLE form. PRE_P points to the list where the side effects for all the expressions in the sequence will be emitted. WANT_VALUE is true when the result of the last COMPOUND_EXPR is used. */ /* ??? Should rearrange to share the pre-queue with all the indirect invocations of gimplify_expr. Would probably save on creations of statement_list nodes. */ static enum gimplify_status gimplify_compound_expr (tree *expr_p, tree *pre_p, bool want_value) { tree t = *expr_p; do { tree *sub_p = &TREE_OPERAND (t, 0); if (TREE_CODE (*sub_p) == COMPOUND_EXPR) gimplify_compound_expr (sub_p, pre_p, false); else gimplify_stmt (sub_p); append_to_statement_list (*sub_p, pre_p); t = TREE_OPERAND (t, 1); } while (TREE_CODE (t) == COMPOUND_EXPR); *expr_p = t; if (want_value) return GS_OK; else { gimplify_stmt (expr_p); return GS_ALL_DONE; } } /* Gimplifies a statement list. These may be created either by an enlightened front-end, or by shortcut_cond_expr. */ static enum gimplify_status gimplify_statement_list (tree *expr_p, tree *pre_p) { tree temp = voidify_wrapper_expr (*expr_p, NULL); tree_stmt_iterator i = tsi_start (*expr_p); while (!tsi_end_p (i)) { tree t; gimplify_stmt (tsi_stmt_ptr (i)); t = tsi_stmt (i); if (t == NULL) tsi_delink (&i); else if (TREE_CODE (t) == STATEMENT_LIST) { tsi_link_before (&i, t, TSI_SAME_STMT); tsi_delink (&i); } else tsi_next (&i); } if (temp) { append_to_statement_list (*expr_p, pre_p); *expr_p = temp; return GS_OK; } return GS_ALL_DONE; } /* Gimplify a SAVE_EXPR node. EXPR_P points to the expression to gimplify. After gimplification, EXPR_P will point to a new temporary that holds the original value of the SAVE_EXPR node. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. */ static enum gimplify_status gimplify_save_expr (tree *expr_p, tree *pre_p, tree *post_p) { enum gimplify_status ret = GS_ALL_DONE; tree val; gcc_assert (TREE_CODE (*expr_p) == SAVE_EXPR); val = TREE_OPERAND (*expr_p, 0); /* If the SAVE_EXPR has not been resolved, then evaluate it once. */ if (!SAVE_EXPR_RESOLVED_P (*expr_p)) { /* The operand may be a void-valued expression such as SAVE_EXPRs generated by the Java frontend for class initialization. It is being executed only for its side-effects. */ if (TREE_TYPE (val) == void_type_node) { ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_stmt, fb_none); append_to_statement_list (TREE_OPERAND (*expr_p, 0), pre_p); val = NULL; } else val = get_initialized_tmp_var (val, pre_p, post_p); TREE_OPERAND (*expr_p, 0) = val; SAVE_EXPR_RESOLVED_P (*expr_p) = 1; } *expr_p = val; return ret; } /* Re-write the ADDR_EXPR node pointed to by EXPR_P unary_expr : ... | '&' varname ... PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. */ static enum gimplify_status gimplify_addr_expr (tree *expr_p, tree *pre_p, tree *post_p) { tree expr = *expr_p; tree op0 = TREE_OPERAND (expr, 0); enum gimplify_status ret; switch (TREE_CODE (op0)) { case INDIRECT_REF: case MISALIGNED_INDIRECT_REF: do_indirect_ref: /* Check if we are dealing with an expression of the form '&*ptr'. While the front end folds away '&*ptr' into 'ptr', these expressions may be generated internally by the compiler (e.g., builtins like __builtin_va_end). */ /* Caution: the silent array decomposition semantics we allow for ADDR_EXPR means we can't always discard the pair. */ /* Gimplification of the ADDR_EXPR operand may drop cv-qualification conversions, so make sure we add them if needed. */ { tree op00 = TREE_OPERAND (op0, 0); tree t_expr = TREE_TYPE (expr); tree t_op00 = TREE_TYPE (op00); if (!lang_hooks.types_compatible_p (t_expr, t_op00)) { #ifdef ENABLE_CHECKING tree t_op0 = TREE_TYPE (op0); gcc_assert (POINTER_TYPE_P (t_expr) && cpt_same_type (TREE_CODE (t_op0) == ARRAY_TYPE ? TREE_TYPE (t_op0) : t_op0, TREE_TYPE (t_expr)) && POINTER_TYPE_P (t_op00) && cpt_same_type (t_op0, TREE_TYPE (t_op00))); #endif op00 = fold_convert (TREE_TYPE (expr), op00); } *expr_p = op00; ret = GS_OK; } break; case VIEW_CONVERT_EXPR: /* Take the address of our operand and then convert it to the type of this ADDR_EXPR. ??? The interactions of VIEW_CONVERT_EXPR and aliasing is not at all clear. The impact of this transformation is even less clear. */ /* If the operand is a useless conversion, look through it. Doing so guarantees that the ADDR_EXPR and its operand will remain of the same type. */ if (tree_ssa_useless_type_conversion (TREE_OPERAND (op0, 0))) op0 = TREE_OPERAND (op0, 0); *expr_p = fold_convert (TREE_TYPE (expr), build_fold_addr_expr (TREE_OPERAND (op0, 0))); ret = GS_OK; break; default: /* We use fb_either here because the C frontend sometimes takes the address of a call that returns a struct; see gcc.dg/c99-array-lval-1.c. The gimplifier will correctly make the implied temporary explicit. */ ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, post_p, is_gimple_addressable, fb_either); if (ret != GS_ERROR) { op0 = TREE_OPERAND (expr, 0); /* For various reasons, the gimplification of the expression may have made a new INDIRECT_REF. */ if (TREE_CODE (op0) == INDIRECT_REF) goto do_indirect_ref; /* Make sure TREE_INVARIANT, TREE_CONSTANT, and TREE_SIDE_EFFECTS is set properly. */ recompute_tree_invariant_for_addr_expr (expr); /* Mark the RHS addressable. */ lang_hooks.mark_addressable (TREE_OPERAND (expr, 0)); } break; } return ret; } /* Gimplify the operands of an ASM_EXPR. Input operands should be a gimple value; output operands should be a gimple lvalue. */ static enum gimplify_status gimplify_asm_expr (tree *expr_p, tree *pre_p, tree *post_p) { tree expr = *expr_p; int noutputs = list_length (ASM_OUTPUTS (expr)); const char **oconstraints = (const char **) alloca ((noutputs) * sizeof (const char *)); int i; tree link; const char *constraint; bool allows_mem, allows_reg, is_inout; enum gimplify_status ret, tret; ret = GS_ALL_DONE; for (i = 0, link = ASM_OUTPUTS (expr); link; ++i, link = TREE_CHAIN (link)) { size_t constraint_len; oconstraints[i] = constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link))); constraint_len = strlen (constraint); if (constraint_len == 0) continue; parse_output_constraint (&constraint, i, 0, 0, &allows_mem, &allows_reg, &is_inout); if (!allows_reg && allows_mem) lang_hooks.mark_addressable (TREE_VALUE (link)); tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p, is_inout ? is_gimple_min_lval : is_gimple_lvalue, fb_lvalue | fb_mayfail); if (tret == GS_ERROR) { error ("invalid lvalue in asm output %d", i); ret = tret; } if (is_inout) { /* An input/output operand. To give the optimizers more flexibility, split it into separate input and output operands. */ tree input; char buf[10]; /* Turn the in/out constraint into an output constraint. */ char *p = xstrdup (constraint); p[0] = '='; TREE_VALUE (TREE_PURPOSE (link)) = build_string (constraint_len, p); /* And add a matching input constraint. */ if (allows_reg) { sprintf (buf, "%d", i); /* If there are multiple alternatives in the constraint, handle each of them individually. Those that allow register will be replaced with operand number, the others will stay unchanged. */ if (strchr (p, ',') != NULL) { size_t len = 0, buflen = strlen (buf); char *beg, *end, *str, *dst; for (beg = p + 1;;) { end = strchr (beg, ','); if (end == NULL) end = strchr (beg, '\0'); if ((size_t) (end - beg) < buflen) len += buflen + 1; else len += end - beg + 1; if (*end) beg = end + 1; else break; } str = (char *) alloca (len); for (beg = p + 1, dst = str;;) { const char *tem; bool mem_p, reg_p, inout_p; end = strchr (beg, ','); if (end) *end = '\0'; beg[-1] = '='; tem = beg - 1; parse_output_constraint (&tem, i, 0, 0, &mem_p, &reg_p, &inout_p); if (dst != str) *dst++ = ','; if (reg_p) { memcpy (dst, buf, buflen); dst += buflen; } else { if (end) len = end - beg; else len = strlen (beg); memcpy (dst, beg, len); dst += len; } if (end) beg = end + 1; else break; } *dst = '\0'; input = build_string (dst - str, str); } else input = build_string (strlen (buf), buf); } else input = build_string (constraint_len - 1, constraint + 1); free (p); input = build_tree_list (build_tree_list (NULL_TREE, input), unshare_expr (TREE_VALUE (link))); ASM_INPUTS (expr) = chainon (ASM_INPUTS (expr), input); } } for (link = ASM_INPUTS (expr); link; ++i, link = TREE_CHAIN (link)) { constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link))); parse_input_constraint (&constraint, 0, 0, noutputs, 0, oconstraints, &allows_mem, &allows_reg); /* If the operand is a memory input, it should be an lvalue. */ if (!allows_reg && allows_mem) { tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p, is_gimple_lvalue, fb_lvalue | fb_mayfail); lang_hooks.mark_addressable (TREE_VALUE (link)); if (tret == GS_ERROR) { error ("memory input %d is not directly addressable", i); ret = tret; } } else { tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p, is_gimple_asm_val, fb_rvalue); if (tret == GS_ERROR) ret = tret; } } return ret; } /* Gimplify a CLEANUP_POINT_EXPR. Currently this works by adding WITH_CLEANUP_EXPRs to the prequeue as we encounter cleanups while gimplifying the body, and converting them to TRY_FINALLY_EXPRs when we return to this function. FIXME should we complexify the prequeue handling instead? Or use flags for all the cleanups and let the optimizer tighten them up? The current code seems pretty fragile; it will break on a cleanup within any non-conditional nesting. But any such nesting would be broken, anyway; we can't write a TRY_FINALLY_EXPR that starts inside a nesting construct and continues out of it. We can do that at the RTL level, though, so having an optimizer to tighten up try/finally regions would be a Good Thing. */ static enum gimplify_status gimplify_cleanup_point_expr (tree *expr_p, tree *pre_p) { tree_stmt_iterator iter; tree body; tree temp = voidify_wrapper_expr (*expr_p, NULL); /* We only care about the number of conditions between the innermost CLEANUP_POINT_EXPR and the cleanup. So save and reset the count and any cleanups collected outside the CLEANUP_POINT_EXPR. */ int old_conds = gimplify_ctxp->conditions; tree old_cleanups = gimplify_ctxp->conditional_cleanups; gimplify_ctxp->conditions = 0; gimplify_ctxp->conditional_cleanups = NULL_TREE; body = TREE_OPERAND (*expr_p, 0); gimplify_to_stmt_list (&body); gimplify_ctxp->conditions = old_conds; gimplify_ctxp->conditional_cleanups = old_cleanups; for (iter = tsi_start (body); !tsi_end_p (iter); ) { tree *wce_p = tsi_stmt_ptr (iter); tree wce = *wce_p; if (TREE_CODE (wce) == WITH_CLEANUP_EXPR) { if (tsi_one_before_end_p (iter)) { tsi_link_before (&iter, TREE_OPERAND (wce, 0), TSI_SAME_STMT); tsi_delink (&iter); break; } else { tree sl, tfe; enum tree_code code; if (CLEANUP_EH_ONLY (wce)) code = TRY_CATCH_EXPR; else code = TRY_FINALLY_EXPR; sl = tsi_split_statement_list_after (&iter); tfe = build2 (code, void_type_node, sl, NULL_TREE); append_to_statement_list (TREE_OPERAND (wce, 0), &TREE_OPERAND (tfe, 1)); *wce_p = tfe; iter = tsi_start (sl); } } else tsi_next (&iter); } if (temp) { *expr_p = temp; append_to_statement_list (body, pre_p); return GS_OK; } else { *expr_p = body; return GS_ALL_DONE; } } /* Insert a cleanup marker for gimplify_cleanup_point_expr. CLEANUP is the cleanup action required. */ static void gimple_push_cleanup (tree var, tree cleanup, bool eh_only, tree *pre_p) { tree wce; /* Errors can result in improperly nested cleanups. Which results in confusion when trying to resolve the WITH_CLEANUP_EXPR. */ if (errorcount || sorrycount) return; if (gimple_conditional_context ()) { /* If we're in a conditional context, this is more complex. We only want to run the cleanup if we actually ran the initialization that necessitates it, but we want to run it after the end of the conditional context. So we wrap the try/finally around the condition and use a flag to determine whether or not to actually run the destructor. Thus test ? f(A()) : 0 becomes (approximately) flag = 0; try { if (test) { A::A(temp); flag = 1; val = f(temp); } else { val = 0; } } finally { if (flag) A::~A(temp); } val */ tree flag = create_tmp_var (boolean_type_node, "cleanup"); tree ffalse = build2 (MODIFY_EXPR, void_type_node, flag, boolean_false_node); tree ftrue = build2 (MODIFY_EXPR, void_type_node, flag, boolean_true_node); cleanup = build3 (COND_EXPR, void_type_node, flag, cleanup, NULL); wce = build1 (WITH_CLEANUP_EXPR, void_type_node, cleanup); append_to_statement_list (ffalse, &gimplify_ctxp->conditional_cleanups); append_to_statement_list (wce, &gimplify_ctxp->conditional_cleanups); append_to_statement_list (ftrue, pre_p); /* Because of this manipulation, and the EH edges that jump threading cannot redirect, the temporary (VAR) will appear to be used uninitialized. Don't warn. */ TREE_NO_WARNING (var) = 1; } else { wce = build1 (WITH_CLEANUP_EXPR, void_type_node, cleanup); CLEANUP_EH_ONLY (wce) = eh_only; append_to_statement_list (wce, pre_p); } gimplify_stmt (&TREE_OPERAND (wce, 0)); } /* Gimplify a TARGET_EXPR which doesn't appear on the rhs of an INIT_EXPR. */ static enum gimplify_status gimplify_target_expr (tree *expr_p, tree *pre_p, tree *post_p) { tree targ = *expr_p; tree temp = TARGET_EXPR_SLOT (targ); tree init = TARGET_EXPR_INITIAL (targ); enum gimplify_status ret; if (init) { /* TARGET_EXPR temps aren't part of the enclosing block, so add it to the temps list. */ gimple_add_tmp_var (temp); /* If TARGET_EXPR_INITIAL is void, then the mere evaluation of the expression is supposed to initialize the slot. */ if (VOID_TYPE_P (TREE_TYPE (init))) ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none); else { init = build2 (INIT_EXPR, void_type_node, temp, init); ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none); } if (ret == GS_ERROR) { /* PR c++/28266 Make sure this is expanded only once. */ TARGET_EXPR_INITIAL (targ) = NULL_TREE; return GS_ERROR; } append_to_statement_list (init, pre_p); /* If needed, push the cleanup for the temp. */ if (TARGET_EXPR_CLEANUP (targ)) { gimplify_stmt (&TARGET_EXPR_CLEANUP (targ)); gimple_push_cleanup (temp, TARGET_EXPR_CLEANUP (targ), CLEANUP_EH_ONLY (targ), pre_p); #ifdef KEY /* bug 10962 */ if (flag_spin_file) { EMIT_TARGET_EXPR_CLEANUP(targ) = 1; if (gspin_invoked (targ)) gs_set_flag_value (targ, GS_EMIT_TARGET_EXPR_CLEANUP, 1); } #endif } /* Only expand this once. */ TREE_OPERAND (targ, 3) = init; TARGET_EXPR_INITIAL (targ) = NULL_TREE; } else /* We should have expanded this before. */ gcc_assert (DECL_SEEN_IN_BIND_EXPR_P (temp)); *expr_p = temp; return GS_OK; } /* Gimplification of expression trees. */ /* Gimplify an expression which appears at statement context; usually, this means replacing it with a suitably gimple STATEMENT_LIST. */ void gimplify_stmt (tree *stmt_p) { gimplify_expr (stmt_p, NULL, NULL, is_gimple_stmt, fb_none); } /* Similarly, but force the result to be a STATEMENT_LIST. */ void gimplify_to_stmt_list (tree *stmt_p) { gimplify_stmt (stmt_p); if (!*stmt_p) *stmt_p = alloc_stmt_list (); else if (TREE_CODE (*stmt_p) != STATEMENT_LIST) { tree t = *stmt_p; *stmt_p = alloc_stmt_list (); append_to_statement_list (t, stmt_p); } } /* Add FIRSTPRIVATE entries for DECL in the OpenMP the surrounding parallels to CTX. If entries already exist, force them to be some flavor of private. If there is no enclosing parallel, do nothing. */ void omp_firstprivatize_variable (struct gimplify_omp_ctx *ctx, tree decl) { splay_tree_node n; if (decl == NULL || !DECL_P (decl)) return; do { n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl); if (n != NULL) { if (n->value & GOVD_SHARED) n->value = GOVD_FIRSTPRIVATE | (n->value & GOVD_SEEN); else return; } else if (ctx->region_type != ORT_WORKSHARE) omp_add_variable (ctx, decl, GOVD_FIRSTPRIVATE); ctx = ctx->outer_context; } while (ctx); } /* Similarly for each of the type sizes of TYPE. */ static void omp_firstprivatize_type_sizes (struct gimplify_omp_ctx *ctx, tree type) { if (type == NULL || type == error_mark_node) return; type = TYPE_MAIN_VARIANT (type); if (pointer_set_insert (ctx->privatized_types, type)) return; switch (TREE_CODE (type)) { case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: case REAL_TYPE: omp_firstprivatize_variable (ctx, TYPE_MIN_VALUE (type)); omp_firstprivatize_variable (ctx, TYPE_MAX_VALUE (type)); break; case ARRAY_TYPE: omp_firstprivatize_type_sizes (ctx, TREE_TYPE (type)); omp_firstprivatize_type_sizes (ctx, TYPE_DOMAIN (type)); break; case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: { tree field; for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) if (TREE_CODE (field) == FIELD_DECL) { omp_firstprivatize_variable (ctx, DECL_FIELD_OFFSET (field)); omp_firstprivatize_type_sizes (ctx, TREE_TYPE (field)); } } break; case POINTER_TYPE: case REFERENCE_TYPE: omp_firstprivatize_type_sizes (ctx, TREE_TYPE (type)); break; default: break; } omp_firstprivatize_variable (ctx, TYPE_SIZE (type)); omp_firstprivatize_variable (ctx, TYPE_SIZE_UNIT (type)); lang_hooks.types.omp_firstprivatize_type_sizes (ctx, type); } /* Add an entry for DECL in the OpenMP context CTX with FLAGS. */ static void omp_add_variable (struct gimplify_omp_ctx *ctx, tree decl, unsigned int flags) { splay_tree_node n; unsigned int nflags; tree t; if (decl == error_mark_node || TREE_TYPE (decl) == error_mark_node) return; /* Never elide decls whose type has TREE_ADDRESSABLE set. This means there are constructors involved somewhere. */ if (TREE_ADDRESSABLE (TREE_TYPE (decl)) || TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (decl))) flags |= GOVD_SEEN; n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl); if (n != NULL) { /* We shouldn't be re-adding the decl with the same data sharing class. */ gcc_assert ((n->value & GOVD_DATA_SHARE_CLASS & flags) == 0); /* The only combination of data sharing classes we should see is FIRSTPRIVATE and LASTPRIVATE. */ nflags = n->value | flags; gcc_assert ((nflags & GOVD_DATA_SHARE_CLASS) == (GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE)); n->value = nflags; return; } /* When adding a variable-sized variable, we have to handle all sorts of additional bits of data: the pointer replacement variable, and the parameters of the type. */ if (DECL_SIZE (decl) && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST) { /* Add the pointer replacement variable as PRIVATE if the variable replacement is private, else FIRSTPRIVATE since we'll need the address of the original variable either for SHARED, or for the copy into or out of the context. */ if (!(flags & GOVD_LOCAL)) { nflags = flags & GOVD_PRIVATE ? GOVD_PRIVATE : GOVD_FIRSTPRIVATE; nflags |= flags & GOVD_SEEN; t = DECL_VALUE_EXPR (decl); gcc_assert (TREE_CODE (t) == INDIRECT_REF); t = TREE_OPERAND (t, 0); gcc_assert (DECL_P (t)); omp_add_variable (ctx, t, nflags); } /* Add all of the variable and type parameters (which should have been gimplified to a formal temporary) as FIRSTPRIVATE. */ omp_firstprivatize_variable (ctx, DECL_SIZE_UNIT (decl)); omp_firstprivatize_variable (ctx, DECL_SIZE (decl)); omp_firstprivatize_type_sizes (ctx, TREE_TYPE (decl)); /* The variable-sized variable itself is never SHARED, only some form of PRIVATE. The sharing would take place via the pointer variable which we remapped above. */ if (flags & GOVD_SHARED) flags = GOVD_PRIVATE | GOVD_DEBUG_PRIVATE | (flags & (GOVD_SEEN | GOVD_EXPLICIT)); /* We're going to make use of the TYPE_SIZE_UNIT at least in the alloca statement we generate for the variable, so make sure it is available. This isn't automatically needed for the SHARED case, since we won't be allocating local storage then. For local variables TYPE_SIZE_UNIT might not be gimplified yet, in this case omp_notice_variable will be called later on when it is gimplified. */ else if (! (flags & GOVD_LOCAL)) omp_notice_variable (ctx, TYPE_SIZE_UNIT (TREE_TYPE (decl)), true); } else if (lang_hooks.decls.omp_privatize_by_reference (decl)) { gcc_assert ((flags & GOVD_LOCAL) == 0); omp_firstprivatize_type_sizes (ctx, TREE_TYPE (decl)); /* Similar to the direct variable sized case above, we'll need the size of references being privatized. */ if ((flags & GOVD_SHARED) == 0) { t = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl))); if (TREE_CODE (t) != INTEGER_CST) omp_notice_variable (ctx, t, true); } } splay_tree_insert (ctx->variables, (splay_tree_key)decl, flags); } /* Record the fact that DECL was used within the OpenMP context CTX. IN_CODE is true when real code uses DECL, and false when we should merely emit default(none) errors. Return true if DECL is going to be remapped and thus DECL shouldn't be gimplified into its DECL_VALUE_EXPR (if any). */ static bool omp_notice_variable (struct gimplify_omp_ctx *ctx, tree decl, bool in_code) { splay_tree_node n; unsigned flags = in_code ? GOVD_SEEN : 0; bool ret = false, shared; if (decl == error_mark_node || TREE_TYPE (decl) == error_mark_node) return false; /* Threadprivate variables are predetermined. */ if (is_global_var (decl)) { if (DECL_THREAD_LOCAL_P (decl)) return false; if (DECL_HAS_VALUE_EXPR_P (decl)) { tree value = get_base_address (DECL_VALUE_EXPR (decl)); if (value && DECL_P (value) && DECL_THREAD_LOCAL_P (value)) return false; } } n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl); if (n == NULL) { enum omp_clause_default_kind default_kind, kind; struct gimplify_omp_ctx *octx; if (ctx->region_type == ORT_WORKSHARE) goto do_outer; /* ??? Some compiler-generated variables (like SAVE_EXPRs) could be remapped firstprivate instead of shared. To some extent this is addressed in omp_firstprivatize_type_sizes, but not effectively. */ default_kind = ctx->default_kind; kind = lang_hooks.decls.omp_predetermined_sharing (decl); if (kind != OMP_CLAUSE_DEFAULT_UNSPECIFIED) default_kind = kind; switch (default_kind) { case OMP_CLAUSE_DEFAULT_NONE: error ("%qs not specified in enclosing parallel", IDENTIFIER_POINTER (DECL_NAME (decl))); error ("%Henclosing parallel", &ctx->location); /* FALLTHRU */ case OMP_CLAUSE_DEFAULT_SHARED: flags |= GOVD_SHARED; break; case OMP_CLAUSE_DEFAULT_PRIVATE: flags |= GOVD_PRIVATE; break; case OMP_CLAUSE_DEFAULT_FIRSTPRIVATE: flags |= GOVD_FIRSTPRIVATE; break; case OMP_CLAUSE_DEFAULT_UNSPECIFIED: /* decl will be either GOVD_FIRSTPRIVATE or GOVD_SHARED. */ gcc_assert (ctx->region_type == ORT_TASK); if (ctx->outer_context) omp_notice_variable (ctx->outer_context, decl, in_code); for (octx = ctx->outer_context; octx; octx = octx->outer_context) { splay_tree_node n2; n2 = splay_tree_lookup (octx->variables, (splay_tree_key) decl); if (n2 && (n2->value & GOVD_DATA_SHARE_CLASS) != GOVD_SHARED) { flags |= GOVD_FIRSTPRIVATE; break; } if ((octx->region_type & ORT_PARALLEL) != 0) break; } if (flags & GOVD_FIRSTPRIVATE) break; if (octx == NULL && (TREE_CODE (decl) == PARM_DECL || (!is_global_var (decl) && DECL_CONTEXT (decl) == current_function_decl))) { flags |= GOVD_FIRSTPRIVATE; break; } flags |= GOVD_SHARED; break; default: gcc_unreachable (); } if ((flags & GOVD_PRIVATE) && lang_hooks.decls.omp_private_outer_ref (decl)) flags |= GOVD_PRIVATE_OUTER_REF; omp_add_variable (ctx, decl, flags); shared = (flags & GOVD_SHARED) != 0; ret = lang_hooks.decls.omp_disregard_value_expr (decl, shared); goto do_outer; } shared = ((flags | n->value) & GOVD_SHARED) != 0; ret = lang_hooks.decls.omp_disregard_value_expr (decl, shared); /* If nothing changed, there's nothing left to do. */ if ((n->value & flags) == flags) return ret; flags |= n->value; n->value = flags; do_outer: /* If the variable is private in the current context, then we don't need to propagate anything to an outer context. */ if ((flags & GOVD_PRIVATE) && !(flags & GOVD_PRIVATE_OUTER_REF)) return ret; if (ctx->outer_context && omp_notice_variable (ctx->outer_context, decl, in_code)) return true; return ret; } /* Verify that DECL is private within CTX. If there's specific information to the contrary in the innermost scope, generate an error. */ static bool omp_is_private (struct gimplify_omp_ctx *ctx, tree decl) { splay_tree_node n; n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl); if (n != NULL) { if (n->value & GOVD_SHARED) { if (ctx == gimplify_omp_ctxp) { error ("iteration variable %qs should be private", IDENTIFIER_POINTER (DECL_NAME (decl))); n->value = GOVD_PRIVATE; return true; } else return false; } else if ((n->value & GOVD_EXPLICIT) != 0 && (ctx == gimplify_omp_ctxp || (ctx->region_type == ORT_COMBINED_PARALLEL && gimplify_omp_ctxp->outer_context == ctx))) { if ((n->value & GOVD_FIRSTPRIVATE) != 0) error ("iteration variable %qs should not be firstprivate", IDENTIFIER_POINTER (DECL_NAME (decl))); else if ((n->value & GOVD_REDUCTION) != 0) error ("iteration variable %qs should not be reduction", IDENTIFIER_POINTER (DECL_NAME (decl))); } return true; } if (ctx->region_type != ORT_WORKSHARE) return false; else if (ctx->outer_context) return omp_is_private (ctx->outer_context, decl); else return !is_global_var (decl); } /* Return true if DECL is private within a parallel region that binds to the current construct's context or in parallel region's REDUCTION clause. */ static bool omp_check_private (struct gimplify_omp_ctx *ctx, tree decl) { splay_tree_node n; do { ctx = ctx->outer_context; if (ctx == NULL) return !(is_global_var (decl) /* References might be private, but might be shared too. */ || lang_hooks.decls.omp_privatize_by_reference (decl)); n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl); if (n != NULL) return (n->value & GOVD_SHARED) == 0; } while (ctx->region_type == ORT_WORKSHARE); return false; } /* Scan the OpenMP clauses in *LIST_P, installing mappings into a new and previous omp contexts. */ static void gimplify_scan_omp_clauses (tree *list_p, tree *pre_p, enum omp_region_type region_type) { struct gimplify_omp_ctx *ctx, *outer_ctx; tree c; ctx = new_omp_context (region_type); outer_ctx = ctx->outer_context; while ((c = *list_p) != NULL) { enum gimplify_status gs; bool remove = false; bool notice_outer = true; const char *check_non_private = NULL; unsigned int flags; tree decl; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_PRIVATE: flags = GOVD_PRIVATE | GOVD_EXPLICIT; if (lang_hooks.decls.omp_private_outer_ref (OMP_CLAUSE_DECL (c))) { flags |= GOVD_PRIVATE_OUTER_REF; OMP_CLAUSE_PRIVATE_OUTER_REF (c) = 1; } else notice_outer = false; goto do_add; case OMP_CLAUSE_SHARED: flags = GOVD_SHARED | GOVD_EXPLICIT; goto do_add; case OMP_CLAUSE_FIRSTPRIVATE: flags = GOVD_FIRSTPRIVATE | GOVD_EXPLICIT; check_non_private = "firstprivate"; goto do_add; case OMP_CLAUSE_LASTPRIVATE: flags = GOVD_LASTPRIVATE | GOVD_SEEN | GOVD_EXPLICIT; check_non_private = "lastprivate"; goto do_add; case OMP_CLAUSE_REDUCTION: flags = GOVD_REDUCTION | GOVD_SEEN | GOVD_EXPLICIT; check_non_private = "reduction"; goto do_add; do_add: decl = OMP_CLAUSE_DECL (c); if (decl == error_mark_node || TREE_TYPE (decl) == error_mark_node) { remove = true; break; } omp_add_variable (ctx, decl, flags); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { omp_add_variable (ctx, OMP_CLAUSE_REDUCTION_PLACEHOLDER (c), GOVD_LOCAL | GOVD_SEEN); gimplify_omp_ctxp = ctx; push_gimplify_context (); gimplify_stmt (&OMP_CLAUSE_REDUCTION_INIT (c)); pop_gimplify_context (OMP_CLAUSE_REDUCTION_INIT (c)); push_gimplify_context (); gimplify_stmt (&OMP_CLAUSE_REDUCTION_MERGE (c)); pop_gimplify_context (OMP_CLAUSE_REDUCTION_MERGE (c)); gimplify_omp_ctxp = outer_ctx; } else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE && OMP_CLAUSE_LASTPRIVATE_STMT (c)) { gimplify_omp_ctxp = ctx; push_gimplify_context (); if (TREE_CODE (OMP_CLAUSE_LASTPRIVATE_STMT (c)) != BIND_EXPR) { tree bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (bind) = 1; BIND_EXPR_BODY (bind) = OMP_CLAUSE_LASTPRIVATE_STMT (c); OMP_CLAUSE_LASTPRIVATE_STMT (c) = bind; } gimplify_stmt (&OMP_CLAUSE_LASTPRIVATE_STMT (c)); pop_gimplify_context (OMP_CLAUSE_LASTPRIVATE_STMT (c)); gimplify_omp_ctxp = outer_ctx; } if (notice_outer) goto do_notice; break; case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_COPYPRIVATE: decl = OMP_CLAUSE_DECL (c); if (decl == error_mark_node || TREE_TYPE (decl) == error_mark_node) { remove = true; break; } do_notice: if (outer_ctx) omp_notice_variable (outer_ctx, decl, true); if (check_non_private && region_type == ORT_WORKSHARE && omp_check_private (ctx, decl)) { error ("%s variable %qs is private in outer context", check_non_private, IDENTIFIER_POINTER (DECL_NAME (decl))); remove = true; } break; case OMP_CLAUSE_IF: OMP_CLAUSE_OPERAND (c, 0) = gimple_boolify (OMP_CLAUSE_OPERAND (c, 0)); /* Fall through. */ case OMP_CLAUSE_SCHEDULE: case OMP_CLAUSE_NUM_THREADS: gs = gimplify_expr (&OMP_CLAUSE_OPERAND (c, 0), pre_p, NULL, is_gimple_val, fb_rvalue); if (gs == GS_ERROR) remove = true; break; case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_ORDERED: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_COLLAPSE: break; case OMP_CLAUSE_DEFAULT: ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c); break; default: gcc_unreachable (); } if (remove) *list_p = OMP_CLAUSE_CHAIN (c); else list_p = &OMP_CLAUSE_CHAIN (c); } gimplify_omp_ctxp = ctx; } /* For all variables that were not actually used within the context, remove PRIVATE, SHARED, and FIRSTPRIVATE clauses. */ static int gimplify_adjust_omp_clauses_1 (splay_tree_node n, void *data) { tree *list_p = (tree *) data; tree decl = (tree) n->key; unsigned flags = n->value; enum omp_clause_code code; tree clause; bool private_debug; if (flags & (GOVD_EXPLICIT | GOVD_LOCAL)) return 0; if ((flags & GOVD_SEEN) == 0) return 0; if (flags & GOVD_DEBUG_PRIVATE) { gcc_assert ((flags & GOVD_DATA_SHARE_CLASS) == GOVD_PRIVATE); private_debug = true; } else private_debug = lang_hooks.decls.omp_private_debug_clause (decl, !!(flags & GOVD_SHARED)); if (private_debug) code = OMP_CLAUSE_PRIVATE; else if (flags & GOVD_SHARED) { if (is_global_var (decl)) return 0; code = OMP_CLAUSE_SHARED; } else if (flags & GOVD_PRIVATE) code = OMP_CLAUSE_PRIVATE; else if (flags & GOVD_FIRSTPRIVATE) code = OMP_CLAUSE_FIRSTPRIVATE; else gcc_unreachable (); clause = build_omp_clause (code); OMP_CLAUSE_DECL (clause) = decl; OMP_CLAUSE_CHAIN (clause) = *list_p; if (private_debug) OMP_CLAUSE_PRIVATE_DEBUG (clause) = 1; else if (code == OMP_CLAUSE_PRIVATE && (flags & GOVD_PRIVATE_OUTER_REF)) OMP_CLAUSE_PRIVATE_OUTER_REF (clause) = 1; *list_p = clause; lang_hooks.decls.omp_finish_clause (clause); return 0; } static void gimplify_adjust_omp_clauses (tree *list_p) { struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp; tree c, decl; while ((c = *list_p) != NULL) { splay_tree_node n; bool remove = false; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_SHARED: case OMP_CLAUSE_FIRSTPRIVATE: decl = OMP_CLAUSE_DECL (c); n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl); remove = !(n->value & GOVD_SEEN); if (! remove) { bool shared = OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED; if ((n->value & GOVD_DEBUG_PRIVATE) || lang_hooks.decls.omp_private_debug_clause (decl, shared)) { gcc_assert ((n->value & GOVD_DEBUG_PRIVATE) == 0 || ((n->value & GOVD_DATA_SHARE_CLASS) == GOVD_PRIVATE)); OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_PRIVATE); OMP_CLAUSE_PRIVATE_DEBUG (c) = 1; } } break; case OMP_CLAUSE_LASTPRIVATE: /* Make sure OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE is set to accurately reflect the presence of a FIRSTPRIVATE clause. */ decl = OMP_CLAUSE_DECL (c); n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl); OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c) = (n->value & GOVD_FIRSTPRIVATE) != 0; break; case OMP_CLAUSE_REDUCTION: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_COPYPRIVATE: case OMP_CLAUSE_IF: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_SCHEDULE: case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_ORDERED: case OMP_CLAUSE_DEFAULT: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_COLLAPSE: break; default: gcc_unreachable (); } if (remove) *list_p = OMP_CLAUSE_CHAIN (c); else list_p = &OMP_CLAUSE_CHAIN (c); } /* Add in any implicit data sharing. */ splay_tree_foreach (ctx->variables, gimplify_adjust_omp_clauses_1, list_p); gimplify_omp_ctxp = ctx->outer_context; delete_omp_context (ctx); } /* Gimplify the contents of an OMP_PARALLEL statement. This involves gimplification of the body, as well as scanning the body for used variables. We need to do this scan now, because variable-sized decls will be decomposed during gimplification. */ static enum gimplify_status gimplify_omp_parallel (tree *expr_p, tree *pre_p) { tree expr = *expr_p; gimplify_scan_omp_clauses (&OMP_PARALLEL_CLAUSES (expr), pre_p, OMP_PARALLEL_COMBINED (expr) ? ORT_COMBINED_PARALLEL : ORT_PARALLEL); push_gimplify_context (); gimplify_stmt (&OMP_PARALLEL_BODY (expr)); if (TREE_CODE (OMP_PARALLEL_BODY (expr)) == BIND_EXPR) pop_gimplify_context (OMP_PARALLEL_BODY (expr)); else pop_gimplify_context (NULL_TREE); gimplify_adjust_omp_clauses (&OMP_PARALLEL_CLAUSES (expr)); return GS_ALL_DONE; } /* Gimplify the contents of an OMP_TASK statement. This involves gimplification of the body, as well as scanning the body for used variables. We need to do this scan now, because variable-sized decls will be decomposed during gimplification. */ static enum gimplify_status gimplify_omp_task (tree *expr_p, tree *pre_p) { tree expr = *expr_p; gimplify_scan_omp_clauses (&OMP_TASK_CLAUSES (expr), pre_p, ORT_TASK); push_gimplify_context (); gimplify_stmt (&OMP_TASK_BODY (expr)); if (TREE_CODE (OMP_TASK_BODY (expr)) == BIND_EXPR) pop_gimplify_context (OMP_TASK_BODY (expr)); else pop_gimplify_context (NULL_TREE); gimplify_adjust_omp_clauses (&OMP_TASK_CLAUSES (expr)); return GS_ALL_DONE; } /* Gimplify the gross structure of an OMP_FOR statement. */ static enum gimplify_status gimplify_omp_for (tree *expr_p, tree *pre_p) { tree for_stmt, decl, t; enum gimplify_status ret = GS_OK; int i; for_stmt = *expr_p; gimplify_scan_omp_clauses (&OMP_FOR_CLAUSES (for_stmt), pre_p, ORT_WORKSHARE); /* If OMP_FOR is re-gimplified, ensure all variables in pre-body are noticed. */ //? gimplify_stmt (&OMP_FOR_PRE_BODY (for_stmt)); gcc_assert (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) == TREE_VEC_LENGTH (OMP_FOR_COND (for_stmt))); gcc_assert (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) == TREE_VEC_LENGTH (OMP_FOR_INCR (for_stmt))); for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++) { t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i); gcc_assert (TREE_CODE (t) == MODIFY_EXPR); decl = TREE_OPERAND (t, 0); gcc_assert (DECL_P (decl)); gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (decl)) || POINTER_TYPE_P (TREE_TYPE (decl))); /* Make sure the iteration variable is private. */ if (omp_is_private (gimplify_omp_ctxp, decl)) omp_notice_variable (gimplify_omp_ctxp, decl, true); else omp_add_variable (gimplify_omp_ctxp, decl, GOVD_PRIVATE | GOVD_SEEN); ret |= gimplify_expr (&TREE_OPERAND (t, 1), &OMP_FOR_PRE_BODY (for_stmt), NULL, is_gimple_val, fb_rvalue); t = TREE_VEC_ELT (OMP_FOR_COND (for_stmt), i); gcc_assert (COMPARISON_CLASS_P (t)); gcc_assert (TREE_OPERAND (t, 0) == decl); ret |= gimplify_expr (&TREE_OPERAND (t, 1), &OMP_FOR_PRE_BODY (for_stmt), NULL, is_gimple_val, fb_rvalue); t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i); switch (TREE_CODE (t)) { case PREINCREMENT_EXPR: case POSTINCREMENT_EXPR: t = build_int_cst (TREE_TYPE (decl), 1); goto build_modify; case PREDECREMENT_EXPR: case POSTDECREMENT_EXPR: t = build_int_cst (TREE_TYPE (decl), -1); goto build_modify; build_modify: t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t); t = build2 (MODIFY_EXPR, void_type_node, decl, t); TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i) = t; break; case MODIFY_EXPR: gcc_assert (TREE_OPERAND (t, 0) == decl); t = TREE_OPERAND (t, 1); switch (TREE_CODE (t)) { case PLUS_EXPR: if (TREE_OPERAND (t, 1) == decl) { TREE_OPERAND (t, 1) = TREE_OPERAND (t, 0); TREE_OPERAND (t, 0) = decl; break; } case MINUS_EXPR: case POINTER_PLUS_EXPR: gcc_assert (TREE_OPERAND (t, 0) == decl); break; default: gcc_unreachable (); } ret |= gimplify_expr (&TREE_OPERAND (t, 1), &OMP_FOR_PRE_BODY (for_stmt), NULL, is_gimple_val, fb_rvalue); break; default: gcc_unreachable (); } } gimplify_to_stmt_list (&OMP_FOR_BODY (for_stmt)); gimplify_adjust_omp_clauses (&OMP_FOR_CLAUSES (for_stmt)); return ret == GS_ALL_DONE ? GS_ALL_DONE : GS_ERROR; } /* Gimplify the gross structure of other OpenMP worksharing constructs. In particular, OMP_SECTIONS and OMP_SINGLE. */ static enum gimplify_status gimplify_omp_workshare (tree *expr_p, tree *pre_p) { tree stmt = *expr_p; gimplify_scan_omp_clauses (&OMP_CLAUSES (stmt), pre_p, ORT_WORKSHARE); gimplify_to_stmt_list (&OMP_BODY (stmt)); gimplify_adjust_omp_clauses (&OMP_CLAUSES (stmt)); return GS_ALL_DONE; } /* A subroutine of gimplify_omp_atomic. The front end is supposed to have stabilized the lhs of the atomic operation as *ADDR. Return true if EXPR is this stabilized form. */ static bool goa_lhs_expr_p (tree expr, tree addr) { /* Also include casts to other type variants. The C front end is fond of adding these for e.g. volatile variables. This is like STRIP_TYPE_NOPS but includes the main variant lookup. */ while ((TREE_CODE (expr) == NOP_EXPR || TREE_CODE (expr) == CONVERT_EXPR || TREE_CODE (expr) == NON_LVALUE_EXPR) && TREE_OPERAND (expr, 0) != error_mark_node && (TYPE_MAIN_VARIANT (TREE_TYPE (expr)) == TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (expr, 0))))) expr = TREE_OPERAND (expr, 0); if (TREE_CODE (expr) == INDIRECT_REF && TREE_OPERAND (expr, 0) == addr) return true; if (TREE_CODE (addr) == ADDR_EXPR && expr == TREE_OPERAND (addr, 0)) return true; return false; } /* A subroutine of gimplify_omp_atomic. Attempt to implement the atomic operation as a __sync_fetch_and_op builtin. INDEX is log2 of the size of the data type, and thus usable to find the index of the builtin decl. Returns GS_UNHANDLED if the expression is not of the proper form. */ static enum gimplify_status gimplify_omp_atomic_fetch_op (tree *expr_p, tree addr, tree rhs, int index) { enum built_in_function base; tree decl, args, itype; enum insn_code *optab; /* Check for one of the supported fetch-op operations. */ switch (TREE_CODE (rhs)) { case PLUS_EXPR: base = BUILT_IN_FETCH_AND_ADD_N; optab = sync_add_optab; break; case MINUS_EXPR: base = BUILT_IN_FETCH_AND_SUB_N; optab = sync_add_optab; break; case BIT_AND_EXPR: base = BUILT_IN_FETCH_AND_AND_N; optab = sync_and_optab; break; case BIT_IOR_EXPR: base = BUILT_IN_FETCH_AND_OR_N; optab = sync_ior_optab; break; case BIT_XOR_EXPR: base = BUILT_IN_FETCH_AND_XOR_N; optab = sync_xor_optab; break; default: return GS_UNHANDLED; } /* Make sure the expression is of the proper form. */ if (goa_lhs_expr_p (TREE_OPERAND (rhs, 0), addr)) rhs = TREE_OPERAND (rhs, 1); else if (commutative_tree_code (TREE_CODE (rhs)) && goa_lhs_expr_p (TREE_OPERAND (rhs, 1), addr)) rhs = TREE_OPERAND (rhs, 0); else return GS_UNHANDLED; decl = built_in_decls[base + index + 1]; itype = TREE_TYPE (TREE_TYPE (decl)); if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing) return GS_UNHANDLED; args = tree_cons (NULL, fold_convert (itype, rhs), NULL); args = tree_cons (NULL, addr, args); *expr_p = build_function_call_expr (decl, args); return GS_OK; } /* A subroutine of gimplify_omp_atomic_pipeline. Walk *EXPR_P and replace appearances of *LHS_ADDR with LHS_VAR. If an expression does not involve the lhs, evaluate it into a temporary. Return 1 if the lhs appeared as a subexpression, 0 if it did not, or -1 if an error was encountered. */ static int goa_stabilize_expr (tree *expr_p, tree *pre_p, tree lhs_addr, tree lhs_var) { tree expr = *expr_p; int saw_lhs; if (goa_lhs_expr_p (expr, lhs_addr)) { *expr_p = lhs_var; return 1; } if (is_gimple_val (expr)) return 0; saw_lhs = 0; switch (TREE_CODE_CLASS (TREE_CODE (expr))) { case tcc_binary: saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 1), pre_p, lhs_addr, lhs_var); case tcc_unary: saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 0), pre_p, lhs_addr, lhs_var); break; default: break; } if (saw_lhs == 0) { enum gimplify_status gs; gs = gimplify_expr (expr_p, pre_p, NULL, is_gimple_val, fb_rvalue); if (gs != GS_ALL_DONE) saw_lhs = -1; } return saw_lhs; } /* A subroutine of gimplify_omp_atomic. Implement the atomic operation as: oldval = *addr; repeat: newval = rhs; // with oldval replacing *addr in rhs oldval = __sync_val_compare_and_swap (addr, oldval, newval); if (oldval != newval) goto repeat; INDEX is log2 of the size of the data type, and thus usable to find the index of the builtin decl. */ static enum gimplify_status gimplify_omp_atomic_pipeline (tree *expr_p, tree *pre_p, tree addr, tree rhs, int index) { tree oldval, oldival, oldival2, newval, newival, label; tree type, itype, cmpxchg, args, x, iaddr; cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1]; type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr))); itype = TREE_TYPE (TREE_TYPE (cmpxchg)); if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing) return GS_UNHANDLED; oldval = create_tmp_var (type, NULL); newval = create_tmp_var (type, NULL); /* Precompute as much of RHS as possible. In the same walk, replace occurrences of the lhs value with our temporary. */ if (goa_stabilize_expr (&rhs, pre_p, addr, oldval) < 0) return GS_ERROR; x = build_fold_indirect_ref (addr); x = build2 (MODIFY_EXPR, void_type_node, oldval, x); gimplify_and_add (x, pre_p); /* For floating-point values, we'll need to view-convert them to integers so that we can perform the atomic compare and swap. Simplify the following code by always setting up the "i"ntegral variables. */ if (INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)) { oldival = oldval; newival = newval; iaddr = addr; } else { oldival = create_tmp_var (itype, NULL); newival = create_tmp_var (itype, NULL); x = build1 (VIEW_CONVERT_EXPR, itype, oldval); x = build2 (MODIFY_EXPR, void_type_node, oldival, x); gimplify_and_add (x, pre_p); iaddr = fold_convert (build_pointer_type (itype), addr); } oldival2 = create_tmp_var (itype, NULL); label = create_artificial_label (); x = build1 (LABEL_EXPR, void_type_node, label); gimplify_and_add (x, pre_p); x = build2 (MODIFY_EXPR, void_type_node, newval, rhs); gimplify_and_add (x, pre_p); if (newval != newival) { x = build1 (VIEW_CONVERT_EXPR, itype, newval); x = build2 (MODIFY_EXPR, void_type_node, newival, x); gimplify_and_add (x, pre_p); } x = build2 (MODIFY_EXPR, void_type_node, oldival2, fold_convert (itype, oldival)); gimplify_and_add (x, pre_p); args = tree_cons (NULL, fold_convert (itype, newival), NULL); args = tree_cons (NULL, fold_convert (itype, oldival), args); args = tree_cons (NULL, iaddr, args); x = build_function_call_expr (cmpxchg, args); if (oldval == oldival) x = fold_convert (type, x); x = build2 (MODIFY_EXPR, void_type_node, oldival, x); gimplify_and_add (x, pre_p); /* For floating point, be prepared for the loop backedge. */ if (oldval != oldival) { x = build1 (VIEW_CONVERT_EXPR, type, oldival); x = build2 (MODIFY_EXPR, void_type_node, oldval, x); gimplify_and_add (x, pre_p); } /* Note that we always perform the comparison as an integer, even for floating point. This allows the atomic operation to properly succeed even with NaNs and -0.0. */ x = build3 (COND_EXPR, void_type_node, build2 (NE_EXPR, boolean_type_node, oldival, oldival2), build1 (GOTO_EXPR, void_type_node, label), NULL); gimplify_and_add (x, pre_p); *expr_p = NULL; return GS_ALL_DONE; } /* A subroutine of gimplify_omp_atomic. Implement the atomic operation as: GOMP_atomic_start (); *addr = rhs; GOMP_atomic_end (); The result is not globally atomic, but works so long as all parallel references are within #pragma omp atomic directives. According to responses received from omp@openmp.org, appears to be within spec. Which makes sense, since that's how several other compilers handle this situation as well. */ static enum gimplify_status gimplify_omp_atomic_mutex (tree *expr_p, tree *pre_p, tree addr, tree rhs) { tree t; t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START]; t = build_function_call_expr (t, NULL); gimplify_and_add (t, pre_p); t = build_fold_indirect_ref (addr); t = build2 (MODIFY_EXPR, void_type_node, t, rhs); gimplify_and_add (t, pre_p); t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END]; t = build_function_call_expr (t, NULL); gimplify_and_add (t, pre_p); *expr_p = NULL; return GS_ALL_DONE; } /* Gimplify an OMP_ATOMIC statement. */ static enum gimplify_status gimplify_omp_atomic (tree *expr_p, tree *pre_p) { tree addr = TREE_OPERAND (*expr_p, 0); tree rhs = TREE_OPERAND (*expr_p, 1); tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr))); HOST_WIDE_INT index; /* Make sure the type is one of the supported sizes. */ index = tree_low_cst (TYPE_SIZE_UNIT (type), 1); index = exact_log2 (index); if (index >= 0 && index <= 4) { enum gimplify_status gs; unsigned int align; if (DECL_P (TREE_OPERAND (addr, 0))) align = DECL_ALIGN_UNIT (TREE_OPERAND (addr, 0)); else if (TREE_CODE (TREE_OPERAND (addr, 0)) == COMPONENT_REF && TREE_CODE (TREE_OPERAND (TREE_OPERAND (addr, 0), 1)) == FIELD_DECL) align = DECL_ALIGN_UNIT (TREE_OPERAND (TREE_OPERAND (addr, 0), 1)); else align = TYPE_ALIGN_UNIT (type); /* __sync builtins require strict data alignment. */ if (exact_log2 (align) >= index) { /* When possible, use specialized atomic update functions. */ if (INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)) { gs = gimplify_omp_atomic_fetch_op (expr_p, addr, rhs, index); if (gs != GS_UNHANDLED) return gs; } /* If we don't have specialized __sync builtins, try and implement as a compare and swap loop. */ gs = gimplify_omp_atomic_pipeline (expr_p, pre_p, addr, rhs, index); if (gs != GS_UNHANDLED) return gs; } } /* The ultimate fallback is wrapping the operation in a mutex. */ return gimplify_omp_atomic_mutex (expr_p, pre_p, addr, rhs); } /* Gimplifies the expression tree pointed to by EXPR_P. Return 0 if gimplification failed. PRE_P points to the list where side effects that must happen before EXPR should be stored. POST_P points to the list where side effects that must happen after EXPR should be stored, or NULL if there is no suitable list. In that case, we copy the result to a temporary, emit the post-effects, and then return the temporary. GIMPLE_TEST_F points to a function that takes a tree T and returns nonzero if T is in the GIMPLE form requested by the caller. The GIMPLE predicates are in tree-gimple.c. This test is used twice. Before gimplification, the test is invoked to determine whether *EXPR_P is already gimple enough. If that fails, *EXPR_P is gimplified according to its code and GIMPLE_TEST_F is called again. If the test still fails, then a new temporary variable is created and assigned the value of the gimplified expression. FALLBACK tells the function what sort of a temporary we want. If the 1 bit is set, an rvalue is OK. If the 2 bit is set, an lvalue is OK. If both are set, either is OK, but an lvalue is preferable. The return value is either GS_ERROR or GS_ALL_DONE, since this function iterates until solution. */ enum gimplify_status gimplify_expr (tree *expr_p, tree *pre_p, tree *post_p, bool (* gimple_test_f) (tree), fallback_t fallback) { tree tmp; tree internal_pre = NULL_TREE; tree internal_post = NULL_TREE; tree save_expr; int is_statement = (pre_p == NULL); location_t saved_location; enum gimplify_status ret; save_expr = *expr_p; if (save_expr == NULL_TREE) return GS_ALL_DONE; /* We used to check the predicate here and return immediately if it succeeds. This is wrong; the design is for gimplification to be idempotent, and for the predicates to only test for valid forms, not whether they are fully simplified. */ /* Set up our internal queues if needed. */ if (pre_p == NULL) pre_p = &internal_pre; if (post_p == NULL) post_p = &internal_post; saved_location = input_location; if (save_expr != error_mark_node && EXPR_HAS_LOCATION (*expr_p)) input_location = EXPR_LOCATION (*expr_p); /* Loop over the specific gimplifiers until the toplevel node remains the same. */ do { /* Strip away as many useless type conversions as possible at the toplevel. */ STRIP_USELESS_TYPE_CONVERSION (*expr_p); /* Remember the expr. */ save_expr = *expr_p; /* Die, die, die, my darling. */ if (save_expr == error_mark_node || (TREE_TYPE (save_expr) && TREE_TYPE (save_expr) == error_mark_node)) { ret = GS_ERROR; break; } /* Do any language-specific gimplification. */ ret = lang_hooks.gimplify_expr (expr_p, pre_p, post_p); if (ret == GS_OK) { if (*expr_p == NULL_TREE) break; if (*expr_p != save_expr) continue; } else if (ret != GS_UNHANDLED) break; ret = GS_OK; switch (TREE_CODE (*expr_p)) { /* First deal with the special cases. */ case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: ret = gimplify_self_mod_expr (expr_p, pre_p, post_p, fallback != fb_none); break; case ARRAY_REF: case ARRAY_RANGE_REF: case REALPART_EXPR: case IMAGPART_EXPR: case COMPONENT_REF: case VIEW_CONVERT_EXPR: ret = gimplify_compound_lval (expr_p, pre_p, post_p, fallback ? fallback : fb_rvalue); break; case COND_EXPR: ret = gimplify_cond_expr (expr_p, pre_p, fallback); /* C99 code may assign to an array in a structure value of a conditional expression, and this has undefined behavior only on execution, so create a temporary if an lvalue is required. */ if (fallback == fb_lvalue) { *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p); lang_hooks.mark_addressable (*expr_p); } break; case CALL_EXPR: ret = gimplify_call_expr (expr_p, pre_p, fallback != fb_none); /* C99 code may assign to an array in a structure returned from a function, and this has undefined behavior only on execution, so create a temporary if an lvalue is required. */ if (fallback == fb_lvalue) { *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p); lang_hooks.mark_addressable (*expr_p); } break; case TREE_LIST: gcc_unreachable (); case COMPOUND_EXPR: ret = gimplify_compound_expr (expr_p, pre_p, fallback != fb_none); break; case MODIFY_EXPR: case INIT_EXPR: ret = gimplify_modify_expr (expr_p, pre_p, post_p, fallback != fb_none); /* The distinction between MODIFY_EXPR and INIT_EXPR is no longer useful. */ if (*expr_p && TREE_CODE (*expr_p) == INIT_EXPR) TREE_SET_CODE (*expr_p, MODIFY_EXPR); break; case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: ret = gimplify_boolean_expr (expr_p); break; case TRUTH_NOT_EXPR: TREE_OPERAND (*expr_p, 0) = gimple_boolify (TREE_OPERAND (*expr_p, 0)); ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); recalculate_side_effects (*expr_p); break; case ADDR_EXPR: ret = gimplify_addr_expr (expr_p, pre_p, post_p); break; case VA_ARG_EXPR: ret = gimplify_va_arg_expr (expr_p, pre_p, post_p); break; case CONVERT_EXPR: case NOP_EXPR: if (IS_EMPTY_STMT (*expr_p)) { ret = GS_ALL_DONE; break; } if (VOID_TYPE_P (TREE_TYPE (*expr_p)) || fallback == fb_none) { /* Just strip a conversion to void (or in void context) and try again. */ *expr_p = TREE_OPERAND (*expr_p, 0); break; } ret = gimplify_conversion (expr_p); if (ret == GS_ERROR) break; if (*expr_p != save_expr) break; /* FALLTHRU */ case FIX_TRUNC_EXPR: case FIX_CEIL_EXPR: case FIX_FLOOR_EXPR: case FIX_ROUND_EXPR: /* unary_expr: ... | '(' cast ')' val | ... */ ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); recalculate_side_effects (*expr_p); break; case INDIRECT_REF: *expr_p = fold_indirect_ref (*expr_p); if (*expr_p != save_expr) break; /* else fall through. */ case ALIGN_INDIRECT_REF: case MISALIGNED_INDIRECT_REF: ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_reg, fb_rvalue); recalculate_side_effects (*expr_p); break; /* Constants need not be gimplified. */ case INTEGER_CST: case REAL_CST: case STRING_CST: case COMPLEX_CST: case VECTOR_CST: ret = GS_ALL_DONE; break; case CONST_DECL: /* If we require an lvalue, such as for ADDR_EXPR, retain the CONST_DECL node. Otherwise the decl is replaceable by its value. */ /* ??? Should be == fb_lvalue, but ADDR_EXPR passes fb_either. */ if (fallback & fb_lvalue) ret = GS_ALL_DONE; else *expr_p = DECL_INITIAL (*expr_p); break; case DECL_EXPR: ret = gimplify_decl_expr (expr_p); break; case EXC_PTR_EXPR: /* FIXME make this a decl. */ ret = GS_ALL_DONE; break; case BIND_EXPR: ret = gimplify_bind_expr (expr_p, pre_p); break; case LOOP_EXPR: ret = gimplify_loop_expr (expr_p, pre_p); break; case SWITCH_EXPR: ret = gimplify_switch_expr (expr_p, pre_p); break; case EXIT_EXPR: ret = gimplify_exit_expr (expr_p); break; case GOTO_EXPR: /* If the target is not LABEL, then it is a computed jump and the target needs to be gimplified. */ if (TREE_CODE (GOTO_DESTINATION (*expr_p)) != LABEL_DECL) ret = gimplify_expr (&GOTO_DESTINATION (*expr_p), pre_p, NULL, is_gimple_val, fb_rvalue); break; case LABEL_EXPR: ret = GS_ALL_DONE; gcc_assert (decl_function_context (LABEL_EXPR_LABEL (*expr_p)) == current_function_decl); break; case CASE_LABEL_EXPR: ret = gimplify_case_label_expr (expr_p); break; case RETURN_EXPR: ret = gimplify_return_expr (*expr_p, pre_p); break; case CONSTRUCTOR: /* Don't reduce this in place; let gimplify_init_constructor work its magic. Buf if we're just elaborating this for side effects, just gimplify any element that has side-effects. */ if (fallback == fb_none) { unsigned HOST_WIDE_INT ix; constructor_elt *ce; tree temp = NULL_TREE; for (ix = 0; VEC_iterate (constructor_elt, CONSTRUCTOR_ELTS (*expr_p), ix, ce); ix++) if (TREE_SIDE_EFFECTS (ce->value)) append_to_statement_list (ce->value, &temp); *expr_p = temp; ret = GS_OK; } /* C99 code may assign to an array in a constructed structure or union, and this has undefined behavior only on execution, so create a temporary if an lvalue is required. */ else if (fallback == fb_lvalue) { *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p); lang_hooks.mark_addressable (*expr_p); } else ret = GS_ALL_DONE; break; /* The following are special cases that are not handled by the original GIMPLE grammar. */ /* SAVE_EXPR nodes are converted into a GIMPLE identifier and eliminated. */ case SAVE_EXPR: ret = gimplify_save_expr (expr_p, pre_p, post_p); break; case BIT_FIELD_REF: { enum gimplify_status r0, r1, r2; r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_lvalue, fb_either); r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); r2 = gimplify_expr (&TREE_OPERAND (*expr_p, 2), pre_p, post_p, is_gimple_val, fb_rvalue); recalculate_side_effects (*expr_p); ret = MIN (r0, MIN (r1, r2)); } break; case NON_LVALUE_EXPR: /* This should have been stripped above. */ gcc_unreachable (); case ASM_EXPR: ret = gimplify_asm_expr (expr_p, pre_p, post_p); break; case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: gimplify_to_stmt_list (&TREE_OPERAND (*expr_p, 0)); gimplify_to_stmt_list (&TREE_OPERAND (*expr_p, 1)); ret = GS_ALL_DONE; break; case CLEANUP_POINT_EXPR: ret = gimplify_cleanup_point_expr (expr_p, pre_p); break; case TARGET_EXPR: ret = gimplify_target_expr (expr_p, pre_p, post_p); break; case CATCH_EXPR: gimplify_to_stmt_list (&CATCH_BODY (*expr_p)); ret = GS_ALL_DONE; break; case EH_FILTER_EXPR: gimplify_to_stmt_list (&EH_FILTER_FAILURE (*expr_p)); ret = GS_ALL_DONE; break; case OBJ_TYPE_REF: { enum gimplify_status r0, r1; r0 = gimplify_expr (&OBJ_TYPE_REF_OBJECT (*expr_p), pre_p, post_p, is_gimple_val, fb_rvalue); r1 = gimplify_expr (&OBJ_TYPE_REF_EXPR (*expr_p), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (r0, r1); } break; case LABEL_DECL: /* We get here when taking the address of a label. We mark the label as "forced"; meaning it can never be removed and it is a potential target for any computed goto. */ FORCED_LABEL (*expr_p) = 1; ret = GS_ALL_DONE; break; case STATEMENT_LIST: ret = gimplify_statement_list (expr_p, pre_p); break; case WITH_SIZE_EXPR: { gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p == &internal_post ? NULL : post_p, gimple_test_f, fallback); gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); } break; case VAR_DECL: case PARM_DECL: ret = gimplify_var_or_parm_decl (expr_p); break; case RESULT_DECL: /* When within an OpenMP context, notice uses of variables. */ if (gimplify_omp_ctxp) omp_notice_variable (gimplify_omp_ctxp, *expr_p, true); ret = GS_ALL_DONE; break; case SSA_NAME: /* Allow callbacks into the gimplifier during optimization. */ ret = GS_ALL_DONE; break; case OMP_PARALLEL: ret = gimplify_omp_parallel (expr_p, pre_p); break; case OMP_TASK: ret = gimplify_omp_task (expr_p, pre_p); break; case OMP_FOR: ret = gimplify_omp_for (expr_p, pre_p); break; case OMP_SECTIONS: case OMP_SINGLE: ret = gimplify_omp_workshare (expr_p, pre_p); break; case OMP_SECTION: case OMP_MASTER: case OMP_ORDERED: case OMP_CRITICAL: gimplify_to_stmt_list (&OMP_BODY (*expr_p)); break; case OMP_ATOMIC: ret = gimplify_omp_atomic (expr_p, pre_p); break; case OMP_RETURN: case OMP_CONTINUE: ret = GS_ALL_DONE; break; default: switch (TREE_CODE_CLASS (TREE_CODE (*expr_p))) { case tcc_comparison: /* Handle comparison of objects of non scalar mode aggregates with a call to memcmp. It would be nice to only have to do this for variable-sized objects, but then we'd have to allow the same nest of reference nodes we allow for MODIFY_EXPR and that's too complex. Compare scalar mode aggregates as scalar mode values. Using memcmp for them would be very inefficient at best, and is plain wrong if bitfields are involved. */ { tree type = TREE_TYPE (TREE_OPERAND (*expr_p, 1)); if (!AGGREGATE_TYPE_P (type)) goto expr_2; else if (TYPE_MODE (type) != BLKmode) ret = gimplify_scalar_mode_aggregate_compare (expr_p); else ret = gimplify_variable_sized_compare (expr_p); break; } /* If *EXPR_P does not need to be special-cased, handle it according to its class. */ case tcc_unary: ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); break; case tcc_binary: expr_2: { enum gimplify_status r0, r1; r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (r0, r1); break; } case tcc_declaration: case tcc_constant: ret = GS_ALL_DONE; goto dont_recalculate; default: gcc_assert (TREE_CODE (*expr_p) == TRUTH_AND_EXPR || TREE_CODE (*expr_p) == TRUTH_OR_EXPR || TREE_CODE (*expr_p) == TRUTH_XOR_EXPR); goto expr_2; } recalculate_side_effects (*expr_p); dont_recalculate: break; } /* If we replaced *expr_p, gimplify again. */ if (ret == GS_OK && (*expr_p == NULL || *expr_p == save_expr)) ret = GS_ALL_DONE; } while (ret == GS_OK); /* If we encountered an error_mark somewhere nested inside, either stub out the statement or propagate the error back out. */ if (ret == GS_ERROR) { if (is_statement) *expr_p = NULL; goto out; } /* This was only valid as a return value from the langhook, which we handled. Make sure it doesn't escape from any other context. */ gcc_assert (ret != GS_UNHANDLED); if (fallback == fb_none && *expr_p && !is_gimple_stmt (*expr_p)) { /* We aren't looking for a value, and we don't have a valid statement. If it doesn't have side-effects, throw it away. */ if (!TREE_SIDE_EFFECTS (*expr_p)) *expr_p = NULL; else if (!TREE_THIS_VOLATILE (*expr_p)) { /* This is probably a _REF that contains something nested that has side effects. Recurse through the operands to find it. */ enum tree_code code = TREE_CODE (*expr_p); switch (code) { case COMPONENT_REF: case REALPART_EXPR: case IMAGPART_EXPR: case VIEW_CONVERT_EXPR: gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, gimple_test_f, fallback); break; case ARRAY_REF: case ARRAY_RANGE_REF: gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, gimple_test_f, fallback); gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, gimple_test_f, fallback); break; default: /* Anything else with side-effects must be converted to a valid statement before we get here. */ gcc_unreachable (); } *expr_p = NULL; } else if (COMPLETE_TYPE_P (TREE_TYPE (*expr_p)) && TYPE_MODE (TREE_TYPE (*expr_p)) != BLKmode) { /* Historically, the compiler has treated a bare reference to a non-BLKmode volatile lvalue as forcing a load. */ tree type = TYPE_MAIN_VARIANT (TREE_TYPE (*expr_p)); /* Normally, we do not want to create a temporary for a TREE_ADDRESSABLE type because such a type should not be copied by bitwise-assignment. However, we make an exception here, as all we are doing here is ensuring that we read the bytes that make up the type. We use create_tmp_var_raw because create_tmp_var will abort when given a TREE_ADDRESSABLE type. */ tree tmp = create_tmp_var_raw (type, "vol"); gimple_add_tmp_var (tmp); *expr_p = build2 (MODIFY_EXPR, type, tmp, *expr_p); } else /* We can't do anything useful with a volatile reference to an incomplete type, so just throw it away. Likewise for a BLKmode type, since any implicit inner load should already have been turned into an explicit one by the gimplification process. */ *expr_p = NULL; } /* If we are gimplifying at the statement level, we're done. Tack everything together and replace the original statement with the gimplified form. */ if (fallback == fb_none || is_statement) { if (internal_pre || internal_post) { append_to_statement_list (*expr_p, &internal_pre); append_to_statement_list (internal_post, &internal_pre); annotate_all_with_locus (&internal_pre, input_location); *expr_p = internal_pre; } else if (!*expr_p) ; else if (TREE_CODE (*expr_p) == STATEMENT_LIST) annotate_all_with_locus (expr_p, input_location); else annotate_one_with_locus (*expr_p, input_location); goto out; } /* Otherwise we're gimplifying a subexpression, so the resulting value is interesting. */ /* If it's sufficiently simple already, we're done. Unless we are handling some post-effects internally; if that's the case, we need to copy into a temp before adding the post-effects to the tree. */ if (!internal_post && (*gimple_test_f) (*expr_p)) goto out; /* Otherwise, we need to create a new temporary for the gimplified expression. */ /* We can't return an lvalue if we have an internal postqueue. The object the lvalue refers to would (probably) be modified by the postqueue; we need to copy the value out first, which means an rvalue. */ if ((fallback & fb_lvalue) && !internal_post && is_gimple_addressable (*expr_p)) { /* An lvalue will do. Take the address of the expression, store it in a temporary, and replace the expression with an INDIRECT_REF of that temporary. */ tmp = build_fold_addr_expr (*expr_p); gimplify_expr (&tmp, pre_p, post_p, is_gimple_reg, fb_rvalue); *expr_p = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (tmp)), tmp); } else if ((fallback & fb_rvalue) && is_gimple_formal_tmp_rhs (*expr_p)) { gcc_assert (!VOID_TYPE_P (TREE_TYPE (*expr_p))); /* An rvalue will do. Assign the gimplified expression into a new temporary TMP and replace the original expression with TMP. */ if (internal_post || (fallback & fb_lvalue)) /* The postqueue might change the value of the expression between the initialization and use of the temporary, so we can't use a formal temp. FIXME do we care? */ *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p); else *expr_p = get_formal_tmp_var (*expr_p, pre_p); if (TREE_CODE (*expr_p) != SSA_NAME) DECL_GIMPLE_FORMAL_TEMP_P (*expr_p) = 1; } else { #ifdef ENABLE_CHECKING if (!(fallback & fb_mayfail)) { fprintf (stderr, "gimplification failed:\n"); print_generic_expr (stderr, *expr_p, 0); debug_tree (*expr_p); internal_error ("gimplification failed"); } #endif gcc_assert (fallback & fb_mayfail); /* If this is an asm statement, and the user asked for the impossible, don't die. Fail and let gimplify_asm_expr issue an error. */ ret = GS_ERROR; goto out; } /* Make sure the temporary matches our predicate. */ gcc_assert ((*gimple_test_f) (*expr_p)); if (internal_post) { annotate_all_with_locus (&internal_post, input_location); append_to_statement_list (internal_post, pre_p); } out: input_location = saved_location; return ret; } /* Look through TYPE for variable-sized objects and gimplify each such size that we find. Add to LIST_P any statements generated. */ void gimplify_type_sizes (tree type, tree *list_p) { tree field, t; if (type == NULL || type == error_mark_node) return; /* We first do the main variant, then copy into any other variants. */ type = TYPE_MAIN_VARIANT (type); /* Avoid infinite recursion. */ if (TYPE_SIZES_GIMPLIFIED (type)) return; TYPE_SIZES_GIMPLIFIED (type) = 1; switch (TREE_CODE (type)) { case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: case REAL_TYPE: gimplify_one_sizepos (&TYPE_MIN_VALUE (type), list_p); gimplify_one_sizepos (&TYPE_MAX_VALUE (type), list_p); for (t = TYPE_NEXT_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t)) { TYPE_MIN_VALUE (t) = TYPE_MIN_VALUE (type); TYPE_MAX_VALUE (t) = TYPE_MAX_VALUE (type); } break; case ARRAY_TYPE: /* These types may not have declarations, so handle them here. */ gimplify_type_sizes (TREE_TYPE (type), list_p); gimplify_type_sizes (TYPE_DOMAIN (type), list_p); break; case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) if (TREE_CODE (field) == FIELD_DECL) { gimplify_one_sizepos (&DECL_FIELD_OFFSET (field), list_p); gimplify_type_sizes (TREE_TYPE (field), list_p); } break; case POINTER_TYPE: case REFERENCE_TYPE: /* We used to recurse on the pointed-to type here, which turned out to be incorrect because its definition might refer to variables not yet initialized at this point if a forward declaration is involved. It was actually useful for anonymous pointed-to types to ensure that the sizes evaluation dominates every possible later use of the values. Restricting to such types here would be safe since there is no possible forward declaration around, but would introduce an undesirable middle-end semantic to anonymity. We then defer to front-ends the responsibility of ensuring that the sizes are evaluated both early and late enough, e.g. by attaching artificial type declarations to the tree. */ break; default: break; } gimplify_one_sizepos (&TYPE_SIZE (type), list_p); gimplify_one_sizepos (&TYPE_SIZE_UNIT (type), list_p); for (t = TYPE_NEXT_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t)) { TYPE_SIZE (t) = TYPE_SIZE (type); TYPE_SIZE_UNIT (t) = TYPE_SIZE_UNIT (type); TYPE_SIZES_GIMPLIFIED (t) = 1; } } /* A subroutine of gimplify_type_sizes to make sure that *EXPR_P, a size or position, has had all of its SAVE_EXPRs evaluated. We add any required statements to STMT_P. */ void gimplify_one_sizepos (tree *expr_p, tree *stmt_p) { tree type, expr = *expr_p; /* We don't do anything if the value isn't there, is constant, or contains A PLACEHOLDER_EXPR. We also don't want to do anything if it's already a VAR_DECL. If it's a VAR_DECL from another function, the gimplifier will want to replace it with a new variable, but that will cause problems if this type is from outside the function. It's OK to have that here. */ if (expr == NULL_TREE || TREE_CONSTANT (expr) || TREE_CODE (expr) == VAR_DECL || CONTAINS_PLACEHOLDER_P (expr)) return; type = TREE_TYPE (expr); *expr_p = unshare_expr (expr); gimplify_expr (expr_p, stmt_p, NULL, is_gimple_val, fb_rvalue); expr = *expr_p; /* Verify that we've an exact type match with the original expression. In particular, we do not wish to drop a "sizetype" in favour of a type of similar dimensions. We don't want to pollute the generic type-stripping code with this knowledge because it doesn't matter for the bulk of GENERIC/GIMPLE. It only matters that TYPE_SIZE_UNIT and friends retain their "sizetype-ness". */ if (TREE_TYPE (expr) != type && TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type)) { tree tmp; *expr_p = create_tmp_var (type, NULL); tmp = build1 (NOP_EXPR, type, expr); tmp = build2 (MODIFY_EXPR, type, *expr_p, tmp); if (EXPR_HAS_LOCATION (expr)) SET_EXPR_LOCUS (tmp, EXPR_LOCUS (expr)); else SET_EXPR_LOCATION (tmp, input_location); gimplify_and_add (tmp, stmt_p); } } #ifdef ENABLE_CHECKING /* Compare types A and B for a "close enough" match. */ static bool cpt_same_type (tree a, tree b) { if (lang_hooks.types_compatible_p (a, b)) return true; /* ??? The C++ FE decomposes METHOD_TYPES to FUNCTION_TYPES and doesn't link them together. This routine is intended to catch type errors that will affect the optimizers, and the optimizers don't add new dereferences of function pointers, so ignore it. */ if ((TREE_CODE (a) == FUNCTION_TYPE || TREE_CODE (a) == METHOD_TYPE) && (TREE_CODE (b) == FUNCTION_TYPE || TREE_CODE (b) == METHOD_TYPE)) return true; /* ??? The C FE pushes type qualifiers after the fact into the type of the element from the type of the array. See build_unary_op's handling of ADDR_EXPR. This seems wrong -- if we were going to do this, we should have done it when creating the variable in the first place. Alternately, why aren't the two array types made variants? */ if (TREE_CODE (a) == ARRAY_TYPE && TREE_CODE (b) == ARRAY_TYPE) return cpt_same_type (TREE_TYPE (a), TREE_TYPE (b)); /* And because of those, we have to recurse down through pointers. */ if (POINTER_TYPE_P (a) && POINTER_TYPE_P (b)) return cpt_same_type (TREE_TYPE (a), TREE_TYPE (b)); return false; } /* Check for some cases of the front end missing cast expressions. The type of a dereference should correspond to the pointer type; similarly the type of an address should match its object. */ static tree check_pointer_types_r (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { tree t = *tp; tree ptype, otype, dtype; switch (TREE_CODE (t)) { case INDIRECT_REF: case ARRAY_REF: otype = TREE_TYPE (t); ptype = TREE_TYPE (TREE_OPERAND (t, 0)); dtype = TREE_TYPE (ptype); gcc_assert (cpt_same_type (otype, dtype)); break; case ADDR_EXPR: ptype = TREE_TYPE (t); otype = TREE_TYPE (TREE_OPERAND (t, 0)); dtype = TREE_TYPE (ptype); if (!cpt_same_type (otype, dtype)) { /* &array is allowed to produce a pointer to the element, rather than a pointer to the array type. We must allow this in order to properly represent assigning the address of an array in C into pointer to the element type. */ gcc_assert (TREE_CODE (otype) == ARRAY_TYPE && POINTER_TYPE_P (ptype) && cpt_same_type (TREE_TYPE (otype), dtype)); break; } break; default: return NULL_TREE; } return NULL_TREE; } #endif /* Gimplify the body of statements pointed to by BODY_P. FNDECL is the function decl containing BODY. */ void gimplify_body (tree *body_p, tree fndecl, bool do_parms) { location_t saved_location = input_location; tree body, parm_stmts; timevar_push (TV_TREE_GIMPLIFY); gcc_assert (gimplify_ctxp == NULL); push_gimplify_context (); /* Unshare most shared trees in the body and in that of any nested functions. It would seem we don't have to do this for nested functions because they are supposed to be output and then the outer function gimplified first, but the g++ front end doesn't always do it that way. */ unshare_body (body_p, fndecl); unvisit_body (body_p, fndecl); /* Make sure input_location isn't set to something wierd. */ input_location = DECL_SOURCE_LOCATION (fndecl); /* Resolve callee-copies. This has to be done before processing the body so that DECL_VALUE_EXPR gets processed correctly. */ parm_stmts = do_parms ? gimplify_parameters () : NULL; /* Gimplify the function's body. */ gimplify_stmt (body_p); body = *body_p; if (!body) body = alloc_stmt_list (); else if (TREE_CODE (body) == STATEMENT_LIST) { tree t = expr_only (*body_p); if (t) body = t; } /* If there isn't an outer BIND_EXPR, add one. */ if (TREE_CODE (body) != BIND_EXPR) { tree b = build3 (BIND_EXPR, void_type_node, NULL_TREE, NULL_TREE, NULL_TREE); TREE_SIDE_EFFECTS (b) = 1; append_to_statement_list_force (body, &BIND_EXPR_BODY (b)); body = b; } /* If we had callee-copies statements, insert them at the beginning of the function. */ if (parm_stmts) { append_to_statement_list_force (BIND_EXPR_BODY (body), &parm_stmts); BIND_EXPR_BODY (body) = parm_stmts; } /* Unshare again, in case gimplification was sloppy. */ unshare_all_trees (body); *body_p = body; pop_gimplify_context (body); gcc_assert (gimplify_ctxp == NULL); #ifdef ENABLE_CHECKING walk_tree (body_p, check_pointer_types_r, NULL, NULL); #endif timevar_pop (TV_TREE_GIMPLIFY); input_location = saved_location; } /* Entry point to the gimplification pass. FNDECL is the FUNCTION_DECL node for the function we want to gimplify. */ void gimplify_function_tree (tree fndecl) { tree oldfn, parm, ret; oldfn = current_function_decl; current_function_decl = fndecl; cfun = DECL_STRUCT_FUNCTION (fndecl); if (cfun == NULL) allocate_struct_function (fndecl); for (parm = DECL_ARGUMENTS (fndecl); parm ; parm = TREE_CHAIN (parm)) { /* Preliminarily mark non-addressed complex variables as eligible for promotion to gimple registers. We'll transform their uses as we find them. */ if (TREE_CODE (TREE_TYPE (parm)) == COMPLEX_TYPE && !TREE_THIS_VOLATILE (parm) && !needs_to_live_in_memory (parm)) DECL_COMPLEX_GIMPLE_REG_P (parm) = 1; } ret = DECL_RESULT (fndecl); if (TREE_CODE (TREE_TYPE (ret)) == COMPLEX_TYPE && !needs_to_live_in_memory (ret)) DECL_COMPLEX_GIMPLE_REG_P (ret) = 1; gimplify_body (&DECL_SAVED_TREE (fndecl), fndecl, true); /* If we're instrumenting function entry/exit, then prepend the call to the entry hook and wrap the whole function in a TRY_FINALLY_EXPR to catch the exit hook. */ /* ??? Add some way to ignore exceptions for this TFE. */ if (flag_instrument_function_entry_exit && ! DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (fndecl)) { tree tf, x, bind; tf = build2 (TRY_FINALLY_EXPR, void_type_node, NULL, NULL); TREE_SIDE_EFFECTS (tf) = 1; x = DECL_SAVED_TREE (fndecl); append_to_statement_list (x, &TREE_OPERAND (tf, 0)); x = implicit_built_in_decls[BUILT_IN_PROFILE_FUNC_EXIT]; x = build_function_call_expr (x, NULL); append_to_statement_list (x, &TREE_OPERAND (tf, 1)); bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (bind) = 1; x = implicit_built_in_decls[BUILT_IN_PROFILE_FUNC_ENTER]; x = build_function_call_expr (x, NULL); append_to_statement_list (x, &BIND_EXPR_BODY (bind)); append_to_statement_list (tf, &BIND_EXPR_BODY (bind)); DECL_SAVED_TREE (fndecl) = bind; } current_function_decl = oldfn; cfun = oldfn ? DECL_STRUCT_FUNCTION (oldfn) : NULL; } /* Expands EXPR to list of gimple statements STMTS. If SIMPLE is true, force the result to be either ssa_name or an invariant, otherwise just force it to be a rhs expression. If VAR is not NULL, make the base variable of the final destination be VAR if suitable. */ tree force_gimple_operand (tree expr, tree *stmts, bool simple, tree var) { tree t; enum gimplify_status ret; gimple_predicate gimple_test_f; *stmts = NULL_TREE; if (is_gimple_val (expr)) return expr; gimple_test_f = simple ? is_gimple_val : is_gimple_reg_rhs; push_gimplify_context (); gimplify_ctxp->into_ssa = in_ssa_p; if (var) expr = build2 (MODIFY_EXPR, TREE_TYPE (var), var, expr); if (TREE_CODE (expr) != MODIFY_EXPR && TREE_TYPE (expr) == void_type_node) { gimplify_and_add (expr, stmts); expr = NULL_TREE; } else { ret = gimplify_expr (&expr, stmts, NULL, gimple_test_f, fb_rvalue); gcc_assert (ret != GS_ERROR); } if (referenced_vars) { for (t = gimplify_ctxp->temps; t ; t = TREE_CHAIN (t)) add_referenced_var (t); } pop_gimplify_context (NULL); return expr; } /* Invokes force_gimple_operand for EXPR with parameters SIMPLE_P and VAR. If some statements are produced, emits them before BSI. */ tree force_gimple_operand_bsi (block_stmt_iterator *bsi, tree expr, bool simple_p, tree var) { tree stmts; expr = force_gimple_operand (expr, &stmts, simple_p, var); if (stmts) bsi_insert_before (bsi, stmts, BSI_SAME_STMT); return expr; } #include "gt-gimplify.h"
QED_AEG.h
#pragma once #include "Constants.h" #include "Ensemble.h" #include "Grid.h" #include "AnalyticalField.h" #include "Pusher.h" #include "synchrotron.h" #include <omp.h> #include <random> using namespace constants; namespace pfc { template <class TGrid> // may be AnalyticalField or any Grid type class ScalarQED_AEG_only_electron : public ParticlePusher { public: ScalarQED_AEG_only_electron() { MinProbability = 5e-4; MaxProbability = 0.01; SchwingerField = sqr(Constants<FP>::electronMass() * Constants<FP>::lightVelocity()) * Constants<FP>::lightVelocity() / (-Constants<FP>::electronCharge() * Constants<FP>::planck()); preFactor = sqr(Constants<FP>::electronCharge()) * Constants<FP>::electronMass() * Constants<FP>::lightVelocity() / sqr(Constants<FP>::planck()); coeffPhoton_probability = 1.0; coeffPair_probability = 0.0; distribution = std::uniform_real_distribution<FP>(0.0, 1.0); int max_threads; #ifdef __USE_OMP__ max_threads = omp_get_max_threads(); #else max_threads = 1; #endif AvalanchePhotons.resize(max_threads); AvalancheParticles.resize(max_threads); afterAvalanchePhotons.resize(max_threads); afterAvalancheParticles.resize(max_threads); } void processParticles(Ensemble3d* particles, TGrid* grid, FP timeStep) { int max_threads; #ifdef __USE_OMP__ max_threads = omp_get_max_threads(); #else max_threads = 1; #endif for (int th = 0; th < max_threads; th++) { AvalanchePhotons[th].clear(); AvalancheParticles[th].clear(); afterAvalanchePhotons[th].clear(); afterAvalancheParticles[th].clear(); } if ((*particles)[Photon].size() && coeffPair_probability != 0) HandlePhotons((*particles)[Photon], grid, timeStep); if ((*particles)[Electron].size() && coeffPhoton_probability != 0) HandleParticles((*particles)[Electron], grid, timeStep); if ((*particles)[Positron].size() && coeffPhoton_probability != 0) HandleParticles((*particles)[Positron], grid, timeStep); for (int th = 0; th < max_threads; th++) { for (int ind = 0; ind < afterAvalanchePhotons[th].size(); ind++) { particles->addParticle(afterAvalanchePhotons[th][ind]); } for (int ind = 0; ind < afterAvalancheParticles[th].size(); ind++) { particles->addParticle(afterAvalancheParticles[th][ind]); } } } void Boris(Particle3d&& particle, const FP3& e, const FP3& b, FP timeStep) { FP eCoeff = timeStep * particle.getCharge() / (2 * particle.getMass() * Constants<FP>::lightVelocity()); FP3 eMomentum = e * eCoeff; FP3 um = particle.getP() + eMomentum; FP3 t = b * eCoeff / sqrt((FP)1 + um.norm2()); FP3 uprime = um + cross(um, t); FP3 s = t * (FP)2 / ((FP)1 + t.norm2()); particle.setP(eMomentum + um + cross(uprime, s)); particle.setPosition(particle.getPosition() + timeStep * particle.getVelocity()); } void Boris(ParticleProxy3d&& particle, const FP3& e, const FP3& b, FP timeStep) { FP eCoeff = timeStep * particle.getCharge() / (2 * particle.getMass() * Constants<FP>::lightVelocity()); FP3 eMomentum = e * eCoeff; FP3 um = particle.getP() + eMomentum; FP3 t = b * eCoeff / sqrt((FP)1 + um.norm2()); FP3 uprime = um + cross(um, t); FP3 s = t * (FP)2 / ((FP)1 + t.norm2()); particle.setP(eMomentum + um + cross(uprime, s)); particle.setPosition(particle.getPosition() + timeStep * particle.getVelocity()); } void HandlePhotons(ParticleArray3d& particles, TGrid* grid, FP timeStep) { FP dt = timeStep; #pragma omp parallel for schedule(dynamic, 1) for (int i = 0; i < particles.size(); i++) { int thread_id; #ifdef __USE_OMP__ thread_id = omp_get_thread_num(); #else thread_id = 0; #endif FP3 pPos = particles[i].getPosition(); FP3 k = particles[i].getVelocity(); FP3 e, b; e = grid->getE(pPos); b = grid->getB(pPos); k = (1 / k.norm()) * k; // normalized wave vector particles[i].setPosition(pPos + dt * Constants<FP>::lightVelocity() * k); FP H_eff = sqrt(sqr(e + VP(k, b)) - sqr(SP(e, k))); FP HE = H_eff / SchwingerField; FP pGamma = particles[i].getMomentum().norm() / (Constants<FP>::electronMass() * Constants<FP>::lightVelocity()); FP EstimatedProbability = dt * estimatedPhotons(HE, pGamma); FP Factor = 1; if (EstimatedProbability < MinProbability) { FP r0 = random_number_omp(); if (r0 > EstimatedProbability / MinProbability) continue; else Factor = MinProbability / EstimatedProbability; } if (EstimatedProbability < MaxProbability) { //=======handle single event======== double gamma = pGamma; double chi = gamma * H_eff / SchwingerField; double delta = Pair_Generator(Factor, chi, gamma, dt); if (delta != 0) { Particle3d NewParticle; NewParticle.setType(Electron); NewParticle.setWeight(particles[i].getWeight()); NewParticle.setPosition(particles[i].getPosition()); NewParticle.setMomentum(delta * particles[i].getMomentum()); afterAvalancheParticles[thread_id].push_back(NewParticle); NewParticle.setType(Positron); NewParticle.setMomentum((1 - delta) * particles[i].getMomentum()); afterAvalancheParticles[thread_id].push_back(NewParticle); //deletePhoton } } else { //=======handle avalanche======== AvalancheParticles[thread_id].clear(); AvalanchePhotons[thread_id].clear(); AvalanchePhotons[thread_id].push_back(particles[i]); particles[i].setPosition(particles[i].getPosition() - dt * Constants<FP>::lightVelocity() * k); // go back RunAvalanche(H_eff, e, b, Photon, pGamma, dt); //deletePhoton for (int k = 0; k != AvalanchePhotons[thread_id].size(); k++) afterAvalanchePhotons[thread_id].push_back(AvalanchePhotons[thread_id][k]); for (int k = 0; k != AvalancheParticles[thread_id].size(); k++) afterAvalancheParticles[thread_id].push_back(AvalancheParticles[thread_id][k]); } } } void HandleParticles(ParticleArray3d& particles, TGrid* grid, FP timeStep) { FP dt = timeStep; #pragma omp parallel for schedule(dynamic, 1) for (int i = 0; i < particles.size(); i++) { int thread_id; #ifdef __USE_OMP__ thread_id = omp_get_thread_num(); #else thread_id = 0; #endif FP3 pPos = particles[i].getPosition(); FP3 v = particles[i].getVelocity(); FP3 e, b; e = grid->getE(pPos); b = grid->getB(pPos); FP H_eff = sqr(e + (1 / Constants<FP>::lightVelocity()) * VP(v, b)) - sqr(SP(e, v) / Constants<FP>::lightVelocity()); if (H_eff < 0) H_eff = 0; H_eff = sqrt(H_eff); FP pGamma = particles[i].getGamma(); FP HE = H_eff / SchwingerField; FP EstimatedProbability = dt * estimatedParticles(HE, pGamma); FP Factor = 1; if (EstimatedProbability < MinProbability) { FP r0 = random_number_omp(); if (r0 > EstimatedProbability / MinProbability) { Boris(particles[i], e, b, dt); continue; } else Factor = MinProbability / EstimatedProbability; } if (EstimatedProbability < MaxProbability) { //=======handle single event======== double gamma = pGamma; double chi = gamma * H_eff / SchwingerField; double delta = Photon_MGenerator(Factor, chi, gamma, dt); if (delta != 0) { Particle3d NewParticle; NewParticle.setType(Photon); NewParticle.setWeight(particles[i].getWeight()); NewParticle.setPosition(particles[i].getPosition()); NewParticle.setMomentum(delta * particles[i].getMomentum()); afterAvalanchePhotons[thread_id].push_back(NewParticle); particles[i].setMomentum((1 - delta) * particles[i].getMomentum()); } Boris(particles[i], e, b, dt); } else { //=======handle avalanche======== AvalancheParticles[thread_id].clear(); AvalanchePhotons[thread_id].clear(); AvalancheParticles[thread_id].push_back(particles[i]); RunAvalanche(H_eff, e, b, particles[i].getType(), pGamma, dt); for (int k = 0; k != AvalanchePhotons[thread_id].size(); k++) afterAvalanchePhotons[thread_id].push_back(AvalanchePhotons[thread_id][k]); particles[i].setMomentum(AvalancheParticles[thread_id][0].getMomentum()); particles[i].setPosition(AvalancheParticles[thread_id][0].getPosition()); for (int k = 1; k != AvalancheParticles[thread_id].size(); k++) afterAvalancheParticles[thread_id].push_back(AvalancheParticles[thread_id][k]); } } } void RunAvalanche(double H_eff_global, const FP3& E, const FP3& B, int SeedType, double gamma, double dt) { int thread_id; #ifdef __USE_OMP__ thread_id = omp_get_thread_num(); #else thread_id = 0; #endif vector<Particle3d>& AvalancheParticles = this->AvalancheParticles[thread_id]; vector<Particle3d>& AvalanchePhotons = this->AvalanchePhotons[thread_id]; gamma = max(gamma, 1.0); FP HE = H_eff_global / SchwingerField; FP sub_dt = MaxProbability / estimatedParticles(HE, gamma); int NT = 1 + int(dt / sub_dt); sub_dt = dt / FP(NT); for (int i = 0; i != NT; i++) { for (int k = 0; k != AvalancheParticles.size(); k++) { Boris(AvalancheParticles[k], E, B, sub_dt); FP3 v = AvalancheParticles[k].getVelocity(); FP H_eff = sqr(E + (1 / Constants<FP>::lightVelocity()) * VP(v, B)) - sqr(SP(E, v) / Constants<FP>::lightVelocity()); if (H_eff < 0) H_eff = 0; H_eff = sqrt(H_eff); FP gamma = AvalancheParticles[k].getGamma(); FP chi = gamma * H_eff / SchwingerField; FP delta = Photon_MGenerator(1, chi, gamma, sub_dt); if (delta != 0) { Particle3d NewParticle; NewParticle.setType(Photon); NewParticle.setWeight(AvalancheParticles[k].getWeight()); NewParticle.setPosition(AvalancheParticles[k].getPosition()); NewParticle.setMomentum(delta * AvalancheParticles[k].getMomentum()); AvalanchePhotons.push_back(NewParticle); AvalancheParticles[k].setMomentum((1 - delta) * AvalancheParticles[k].getMomentum()); } } for (int k = 0; k < AvalanchePhotons.size(); k++) { FP3 k_ = AvalanchePhotons[k].getVelocity(); k_ = (1 / k_.norm()) * k_; // normalized wave vector AvalanchePhotons[k].setPosition(AvalanchePhotons[k].getPosition() + sub_dt * Constants<FP>::lightVelocity() * k_); FP H_eff = sqrt(sqr(E + VP(k_, B)) - sqr(SP(E, k_))); FP gamma = AvalanchePhotons[k].getMomentum().norm() / (Constants<FP>::electronMass() * Constants<FP>::lightVelocity()); FP chi = gamma * H_eff / SchwingerField; FP delta = Pair_Generator(1, chi, gamma, sub_dt); if (delta != 0) { Particle3d NewParticle; NewParticle.setType(Electron); NewParticle.setWeight(AvalanchePhotons[k].getWeight()); NewParticle.setPosition(AvalanchePhotons[k].getPosition()); NewParticle.setMomentum(delta * AvalanchePhotons[k].getMomentum()); AvalancheParticles.push_back(NewParticle); NewParticle.setType(Positron); NewParticle.setMomentum((1 - delta) * AvalanchePhotons[k].getMomentum()); AvalancheParticles.push_back(NewParticle); AvalanchePhotons[k] = AvalanchePhotons[AvalanchePhotons.size() - 1]; AvalanchePhotons.pop_back(); k--; } } } } FP estimatedPhotons(FP HE, FP gamma) { return (0.0827 * HE) * preFactor; } FP estimatedParticles(FP HE, FP gamma) { FP b = 3.0 / 2.0 * HE * gamma; FP newFactor; if (b < 0.1) { newFactor = 0.962436 * b / gamma + 0.0827 * HE; } else if (b < 0.5) { newFactor = 0.779009 * pow(b, 11.0 / 12.0) / gamma + 0.0827 * HE; } else if (b < 10) { newFactor = 0.721193 * pow(b, 19.0 / 24.0) / gamma + 0.0827 * HE; } else { newFactor = 0.955556 * pow(b, 2.0 / 3.0) / gamma + 0.0827 * HE; } return newFactor * preFactor; } FP Photon_probability(FP chi, FP gamma, FP d) { FP z = (2 / 3.0) * (1 / chi) * d / (1 - d); FP coeff = (sqrt(3.0) / (2.0 * pi)) * coeffPhoton_probability; if ((z < 700) && (z > 0)) return coeff * (chi / gamma) * ((1 - d) / d) * (synchrotron_1(z) + (3 / 2.0) * d * chi * z * synchrotron_2(z)); else return 0; } FP Pair_probability(FP chi, FP gamma, FP d) { FP z_p = (2 / 3.0) / (chi * (1 - d) * d); FP coeff = (sqrt(3.0) / (2.0 * pi)) * coeffPair_probability; if ((z_p < 700) && (z_p > 0)) return coeff * (chi / gamma) * (d - 1) * d * (synchrotron_1(z_p) - (3 / 2.0) * chi * z_p * synchrotron_2(z_p)); else return 0; } FP Pair_Generator(FP Factor, FP chi, FP gamma, FP dt) //returns photon energy in mc2gamma in case of generation. { FP factor = Factor * dt * preFactor; FP r1 = random_number_omp(); FP r2 = random_number_omp(); if (r2 < factor * Pair_probability(chi, gamma, r1)) return r1; else return 0; } FP Photon_MGenerator(FP Factor, FP chi, FP gamma, FP dt) //Modified event generator: returns photon energy in mc2gamma in case of generation, !doesn't change gamma { double r0 = random_number_omp(); double r1 = r0 * r0 * r0; double r2 = random_number_omp(); double factor = Factor * dt * preFactor; if (r2 < factor * Photon_probability(chi, gamma, r1) * 3 * r0 * r0) return r1; else return 0; } void operator()(ParticleProxy3d* particle, ValueField field, FP timeStep) {} void operator()(Particle3d* particle, ValueField field, FP timeStep) { ParticleProxy3d particleProxy(*particle); this->operator()(&particleProxy, field, timeStep); } private: FP random_number_omp() { FP rand_n; #pragma omp critical rand_n = distribution(rand_generator); return rand_n; } FP MinProbability, MaxProbability; FP SchwingerField; FP preFactor; FP coeffPhoton_probability, coeffPair_probability; std::default_random_engine rand_generator; std::uniform_real_distribution<FP> distribution; vector<vector<Particle3d>> AvalanchePhotons, AvalancheParticles; vector<vector<Particle3d>> afterAvalanchePhotons, afterAvalancheParticles; }; typedef ScalarQED_AEG_only_electron<YeeGrid> ScalarQED_AEG_only_electron_Yee; typedef ScalarQED_AEG_only_electron<PSTDGrid> ScalarQED_AEG_only_electron_PSTD; typedef ScalarQED_AEG_only_electron<PSATDGrid> ScalarQED_AEG_only_electron_PSATD; typedef ScalarQED_AEG_only_electron<AnalyticalField> ScalarQED_AEG_only_electron_Analytical; }
DRB114-if-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* When if() evalutes to true, this program has data races due to true dependence within the loop at 65. Data race pair: a[i+1]@66:5 vs. a[i]@66:12 */ #include <stdlib.h> #include <stdio.h> #include <time.h> void task(int *a, int i) { a[i+1]=a[i]+1; } int main(int argc, char* argv[]) { int i; int len=100; int a[100]; #pragma omp parallel for private(i ) for (i=0;i<len;i++) a[i]=i; srand(time(NULL)); for (i=0;i<len-1;i++) task(&a[0], i); printf("a[50]=%d\n", a[50]); return 0; }
rose_cancellation_point.c
#include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif #include "libxomp.h" struct OUT__1__10852___data { void *iend_p; void *ist_p; } ; static void OUT__1__10852__(void *__out_argv); void foo(int iend,int ist) { int i; struct OUT__1__10852___data __out_argv1__10852__; __out_argv1__10852__ . ist_p = ((void *)(&ist)); __out_argv1__10852__ . iend_p = ((void *)(&iend)); XOMP_parallel_start(OUT__1__10852__,&__out_argv1__10852__,1,0,"/home/awang15/Projects/rexdev/rex_src/tests/nonsmoke/functional/CompileTests/OpenMP_tests/cancellation_point.c",8); XOMP_parallel_end("/home/awang15/Projects/rexdev/rex_src/tests/nonsmoke/functional/CompileTests/OpenMP_tests/cancellation_point.c",18); } static void OUT__1__10852__(void *__out_argv) { int *iend = (int *)(((struct OUT__1__10852___data *)__out_argv) -> iend_p); int *ist = (int *)(((struct OUT__1__10852___data *)__out_argv) -> ist_p); if (XOMP_single()) { printf("Using %d threads.\n",(omp_get_num_threads())); } XOMP_barrier(); { int _p_i; long p_index_; long p_lower_; long p_upper_; XOMP_loop_default( *iend, *ist,-1,&p_lower_,&p_upper_); for (p_index_ = p_lower_; p_index_ >= p_upper_; p_index_ += -1) { printf("Iteration %d is carried out by thread %d\n",p_index_,(omp_get_thread_num())); } } #pragma omp cancellation point parallel }
pr60823-1.c
/* PR tree-optimization/60823 */ /* { dg-do compile } */ /* { dg-options "-O2 -fopenmp-simd" } */ #pragma omp declare simd simdlen(4) notinbranch int foo (const double c1, const double c2) { double z1 = c1, z2 = c2; int res = 100, i; for (i = 0; i < 100; i++) { res = (z1 * z1 + z2 * z2 > 4.0) ? (i < res ? i : res) : res; z1 = c1 + z1 * z1 - z2 * z2; z2 = c2 + 2.0 * z1 * z2; } return res; }
aux_interp.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ #include "_hypre_parcsr_ls.h" #include "aux_interp.h" #include "hypre_hopscotch_hash.h" /*--------------------------------------------------------------------------- * Auxilary routines for the long range interpolation methods. * Implemented: "standard", "extended", "multipass", "FF" *--------------------------------------------------------------------------*/ /* AHB 11/06: Modification of the above original - takes two communication packages and inserts nodes to position expected for OUT_marker offd nodes from comm_pkg take up first chunk of CF_marker_offd, offd nodes from extend_comm_pkg take up the second chunk 0f CF_marker_offd. */ HYPRE_Int hypre_alt_insert_new_nodes(hypre_ParCSRCommPkg *comm_pkg, hypre_ParCSRCommPkg *extend_comm_pkg, HYPRE_Int *IN_marker, HYPRE_Int full_off_procNodes, HYPRE_Int *OUT_marker) { hypre_ParCSRCommHandle *comm_handle; HYPRE_Int i, index, shift; HYPRE_Int num_sends, num_recvs; HYPRE_Int *recv_vec_starts; HYPRE_Int e_num_sends; HYPRE_Int *int_buf_data; HYPRE_Int *e_out_marker; num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); e_num_sends = hypre_ParCSRCommPkgNumSends(extend_comm_pkg); index = hypre_max(hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, e_num_sends)); int_buf_data = hypre_CTAlloc(HYPRE_Int, index, HYPRE_MEMORY_HOST); /* orig commpkg data*/ index = 0; HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = begin; i < end; ++i) { int_buf_data[i - begin] = IN_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, OUT_marker); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; /* now do the extend commpkg */ /* first we need to shift our position in the OUT_marker */ shift = recv_vec_starts[num_recvs]; e_out_marker = OUT_marker + shift; index = 0; begin = hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, 0); end = hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, e_num_sends); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = begin; i < end; ++i) { int_buf_data[i - begin] = IN_marker[hypre_ParCSRCommPkgSendMapElmt(extend_comm_pkg, i)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, extend_comm_pkg, int_buf_data, e_out_marker); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_big_insert_new_nodes(hypre_ParCSRCommPkg *comm_pkg, hypre_ParCSRCommPkg *extend_comm_pkg, HYPRE_Int *IN_marker, HYPRE_Int full_off_procNodes, HYPRE_BigInt offset, HYPRE_BigInt *OUT_marker) { hypre_ParCSRCommHandle *comm_handle; HYPRE_Int i, index, shift; HYPRE_Int num_sends, num_recvs; HYPRE_Int *recv_vec_starts; HYPRE_Int e_num_sends; HYPRE_BigInt *int_buf_data; HYPRE_BigInt *e_out_marker; num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); e_num_sends = hypre_ParCSRCommPkgNumSends(extend_comm_pkg); index = hypre_max(hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, e_num_sends)); int_buf_data = hypre_CTAlloc(HYPRE_BigInt, index, HYPRE_MEMORY_HOST); /* orig commpkg data*/ index = 0; HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = begin; i < end; ++i) { int_buf_data[i - begin] = offset + (HYPRE_BigInt) IN_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)]; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, int_buf_data, OUT_marker); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; /* now do the extend commpkg */ /* first we need to shift our position in the OUT_marker */ shift = recv_vec_starts[num_recvs]; e_out_marker = OUT_marker + shift; index = 0; begin = hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, 0); end = hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, e_num_sends); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = begin; i < end; ++i) { int_buf_data[i - begin] = offset + (HYPRE_BigInt) IN_marker[hypre_ParCSRCommPkgSendMapElmt(extend_comm_pkg, i)]; } comm_handle = hypre_ParCSRCommHandleCreate( 21, extend_comm_pkg, int_buf_data, e_out_marker); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* sort for non-ordered arrays */ HYPRE_Int hypre_ssort(HYPRE_BigInt *data, HYPRE_Int n) { HYPRE_Int i,si; HYPRE_Int change = 0; if(n > 0) for(i = n-1; i > 0; i--){ si = hypre_index_of_minimum(data,i+1); if(i != si) { hypre_swap_int(data, i, si); change = 1; } } return change; } /* Auxilary function for hypre_ssort */ HYPRE_Int hypre_index_of_minimum(HYPRE_BigInt *data, HYPRE_Int n) { HYPRE_Int answer; HYPRE_Int i; answer = 0; for(i = 1; i < n; i++) if(data[answer] < data[i]) answer = i; return answer; } void hypre_swap_int(HYPRE_BigInt *data, HYPRE_Int a, HYPRE_Int b) { HYPRE_BigInt temp; temp = data[a]; data[a] = data[b]; data[b] = temp; return; } /* Initialize CF_marker_offd, CF_marker, P_marker, P_marker_offd, tmp */ void hypre_initialize_vecs(HYPRE_Int diag_n, HYPRE_Int offd_n, HYPRE_Int *diag_ftc, HYPRE_BigInt *offd_ftc, HYPRE_Int *diag_pm, HYPRE_Int *offd_pm, HYPRE_Int *tmp_CF) { HYPRE_Int i; /* Quicker initialization */ if(offd_n < diag_n) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for(i = 0; i < offd_n; i++) { diag_ftc[i] = -1; offd_ftc[i] = -1; tmp_CF[i] = -1; if(diag_pm != NULL) { diag_pm[i] = -1; } if(offd_pm != NULL) { offd_pm[i] = -1;} } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for(i = offd_n; i < diag_n; i++) { diag_ftc[i] = -1; if(diag_pm != NULL) { diag_pm[i] = -1; } } } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for(i = 0; i < diag_n; i++) { diag_ftc[i] = -1; offd_ftc[i] = -1; tmp_CF[i] = -1; if(diag_pm != NULL) { diag_pm[i] = -1;} if(offd_pm != NULL) { offd_pm[i] = -1;} } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for(i = diag_n; i < offd_n; i++) { offd_ftc[i] = -1; tmp_CF[i] = -1; if(offd_pm != NULL) { offd_pm[i] = -1;} } } return; } /* Find nodes that are offd and are not contained in original offd * (neighbors of neighbors) */ static HYPRE_Int hypre_new_offd_nodes(HYPRE_BigInt **found, HYPRE_Int num_cols_A_offd, HYPRE_Int *A_ext_i, HYPRE_BigInt *A_ext_j, HYPRE_Int num_cols_S_offd, HYPRE_BigInt *col_map_offd, HYPRE_BigInt col_1, HYPRE_BigInt col_n, HYPRE_Int *Sop_i, HYPRE_BigInt *Sop_j, HYPRE_Int *CF_marker_offd) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); #endif HYPRE_BigInt big_i1, big_k1; HYPRE_Int i, j, kk; HYPRE_Int got_loc, loc_col; /*HYPRE_Int min;*/ HYPRE_Int newoff = 0; #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_UnorderedBigIntMap col_map_offd_inverse; hypre_UnorderedBigIntMapCreate(&col_map_offd_inverse, 2*num_cols_A_offd, 16*hypre_NumThreads()); #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = 0; i < num_cols_A_offd; i++) { hypre_UnorderedBigIntMapPutIfAbsent(&col_map_offd_inverse, col_map_offd[i], i); } /* Find nodes that will be added to the off diag list */ HYPRE_Int size_offP = A_ext_i[num_cols_A_offd]; hypre_UnorderedBigIntSet set; hypre_UnorderedBigIntSetCreate(&set, size_offP, 16*hypre_NumThreads()); #pragma omp parallel private(i,j,big_i1) { #pragma omp for HYPRE_SMP_SCHEDULE for (i = 0; i < num_cols_A_offd; i++) { if (CF_marker_offd[i] < 0) { for (j = A_ext_i[i]; j < A_ext_i[i+1]; j++) { big_i1 = A_ext_j[j]; if(big_i1 < col_1 || big_i1 >= col_n) { if (!hypre_UnorderedBigIntSetContains(&set, big_i1)) { HYPRE_Int k = hypre_UnorderedBigIntMapGet(&col_map_offd_inverse, big_i1); if (-1 == k) { hypre_UnorderedBigIntSetPut(&set, big_i1); } else { A_ext_j[j] = -k - 1; } } } } for (j = Sop_i[i]; j < Sop_i[i+1]; j++) { big_i1 = Sop_j[j]; if(big_i1 < col_1 || big_i1 >= col_n) { if (!hypre_UnorderedBigIntSetContains(&set, big_i1)) { Sop_j[j] = -hypre_UnorderedBigIntMapGet(&col_map_offd_inverse, big_i1) - 1; } } } } /* CF_marker_offd[i] < 0 */ } /* for each row */ } /* omp parallel */ hypre_UnorderedBigIntMapDestroy(&col_map_offd_inverse); HYPRE_BigInt *tmp_found = hypre_UnorderedBigIntSetCopyToArray(&set, &newoff); hypre_UnorderedBigIntSetDestroy(&set); /* Put found in monotone increasing order */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif hypre_UnorderedBigIntMap tmp_found_inverse; if (newoff > 0) { hypre_big_sort_and_create_inverse_map(tmp_found, newoff, &tmp_found, &tmp_found_inverse); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif /* Set column indices for Sop and A_ext such that offd nodes are * negatively indexed */ #pragma omp parallel for private(kk,big_k1,got_loc,loc_col) HYPRE_SMP_SCHEDULE for(i = 0; i < num_cols_A_offd; i++) { if (CF_marker_offd[i] < 0) { for(kk = Sop_i[i]; kk < Sop_i[i+1]; kk++) { big_k1 = Sop_j[kk]; if(big_k1 > -1 && (big_k1 < col_1 || big_k1 >= col_n)) { got_loc = hypre_UnorderedBigIntMapGet(&tmp_found_inverse, big_k1); loc_col = got_loc + num_cols_A_offd; Sop_j[kk] = (HYPRE_BigInt)(-loc_col - 1); } } for (kk = A_ext_i[i]; kk < A_ext_i[i+1]; kk++) { big_k1 = A_ext_j[kk]; if(big_k1 > -1 && (big_k1 < col_1 || big_k1 >= col_n)) { got_loc = hypre_UnorderedBigIntMapGet(&tmp_found_inverse, big_k1); loc_col = got_loc + num_cols_A_offd; A_ext_j[kk] = (HYPRE_BigInt)(-loc_col - 1); } } } } if (newoff) { hypre_UnorderedBigIntMapDestroy(&tmp_found_inverse); } #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ HYPRE_Int size_offP; HYPRE_BigInt *tmp_found; HYPRE_Int min; HYPRE_Int ifound; size_offP = A_ext_i[num_cols_A_offd]+Sop_i[num_cols_A_offd]; tmp_found = hypre_CTAlloc(HYPRE_BigInt, size_offP, HYPRE_MEMORY_HOST); /* Find nodes that will be added to the off diag list */ for (i = 0; i < num_cols_A_offd; i++) { if (CF_marker_offd[i] < 0) { for (j = A_ext_i[i]; j < A_ext_i[i+1]; j++) { big_i1 = A_ext_j[j]; if(big_i1 < col_1 || big_i1 >= col_n) { ifound = hypre_BigBinarySearch(col_map_offd,big_i1,num_cols_A_offd); if(ifound == -1) { tmp_found[newoff]=big_i1; newoff++; } else { A_ext_j[j] = (HYPRE_BigInt)(-ifound-1); } } } for (j = Sop_i[i]; j < Sop_i[i+1]; j++) { big_i1 = Sop_j[j]; if(big_i1 < col_1 || big_i1 >= col_n) { ifound = hypre_BigBinarySearch(col_map_offd,big_i1,num_cols_A_offd); if(ifound == -1) { tmp_found[newoff]=big_i1; newoff++; } else { Sop_j[j] = (HYPRE_BigInt)(-ifound-1); } } } } } /* Put found in monotone increasing order */ if (newoff > 0) { hypre_BigQsort0(tmp_found,0,newoff-1); ifound = tmp_found[0]; min = 1; for (i=1; i < newoff; i++) { if (tmp_found[i] > ifound) { ifound = tmp_found[i]; tmp_found[min++] = ifound; } } newoff = min; } /* Set column indices for Sop and A_ext such that offd nodes are * negatively indexed */ for(i = 0; i < num_cols_A_offd; i++) { if (CF_marker_offd[i] < 0) { for(kk = Sop_i[i]; kk < Sop_i[i+1]; kk++) { big_k1 = Sop_j[kk]; if(big_k1 > -1 && (big_k1 < col_1 || big_k1 >= col_n)) { got_loc = hypre_BigBinarySearch(tmp_found,big_k1,newoff); if(got_loc > -1) loc_col = got_loc + num_cols_A_offd; Sop_j[kk] = (HYPRE_BigInt)(-loc_col - 1); } } for (kk = A_ext_i[i]; kk < A_ext_i[i+1]; kk++) { big_k1 = A_ext_j[kk]; if(big_k1 > -1 && (big_k1 < col_1 || big_k1 >= col_n)) { got_loc = hypre_BigBinarySearch(tmp_found,big_k1,newoff); loc_col = got_loc + num_cols_A_offd; A_ext_j[kk] = (HYPRE_BigInt)(-loc_col - 1); } } } } #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ *found = tmp_found; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); #endif return newoff; } HYPRE_Int hypre_exchange_marker(hypre_ParCSRCommPkg *comm_pkg, HYPRE_Int *IN_marker, HYPRE_Int *OUT_marker) { HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); HYPRE_Int *int_buf_data = hypre_CTAlloc(HYPRE_Int, end, HYPRE_MEMORY_HOST); HYPRE_Int i; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = begin; i < end; ++i) { int_buf_data[i - begin] = IN_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)]; } hypre_ParCSRCommHandle *comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, OUT_marker); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_exchange_interp_data( HYPRE_Int **CF_marker_offd, HYPRE_Int **dof_func_offd, hypre_CSRMatrix **A_ext, HYPRE_Int *full_off_procNodes, hypre_CSRMatrix **Sop, hypre_ParCSRCommPkg **extend_comm_pkg, hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int skip_fine_or_same_sign) // skip_fine_or_same_sign if we want to skip fine points in S and nnz with the same sign as diagonal in A { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXCHANGE_INTERP_DATA] -= hypre_MPI_Wtime(); #endif hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows; HYPRE_BigInt *found = NULL; /*---------------------------------------------------------------------- * Get the off processors rows for A and S, associated with columns in * A_offd and S_offd. *---------------------------------------------------------------------*/ *CF_marker_offd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); hypre_exchange_marker(comm_pkg, CF_marker, *CF_marker_offd); hypre_ParCSRCommHandle *comm_handle_a_idx, *comm_handle_a_data; *A_ext = hypre_ParCSRMatrixExtractBExt_Overlap(A,A,1,&comm_handle_a_idx,&comm_handle_a_data,CF_marker,*CF_marker_offd,skip_fine_or_same_sign,skip_fine_or_same_sign); HYPRE_Int *A_ext_i = hypre_CSRMatrixI(*A_ext); HYPRE_BigInt *A_ext_j = hypre_CSRMatrixBigJ(*A_ext); HYPRE_Int A_ext_rows = hypre_CSRMatrixNumRows(*A_ext); hypre_ParCSRCommHandle *comm_handle_s_idx; *Sop = hypre_ParCSRMatrixExtractBExt_Overlap(S,A,0,&comm_handle_s_idx,NULL,CF_marker,*CF_marker_offd,skip_fine_or_same_sign,0); HYPRE_Int *Sop_i = hypre_CSRMatrixI(*Sop); HYPRE_BigInt *Sop_j = hypre_CSRMatrixBigJ(*Sop); HYPRE_Int Soprows = hypre_CSRMatrixNumRows(*Sop); HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_s_idx->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_s_idx); hypre_TFree(send_idx, HYPRE_MEMORY_HOST); send_idx = (HYPRE_Int *)comm_handle_a_idx->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_a_idx); hypre_TFree(send_idx, HYPRE_MEMORY_HOST); /* Find nodes that are neighbors of neighbors, not found in offd */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXCHANGE_INTERP_DATA] += hypre_MPI_Wtime(); #endif HYPRE_Int newoff = hypre_new_offd_nodes(&found, A_ext_rows, A_ext_i, A_ext_j, Soprows, col_map_offd, col_1, col_n, Sop_i, Sop_j, *CF_marker_offd); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXCHANGE_INTERP_DATA] -= hypre_MPI_Wtime(); #endif if(newoff >= 0) *full_off_procNodes = newoff + num_cols_A_offd; else { return hypre_error_flag; } /* Possibly add new points and new processors to the comm_pkg, all * processors need new_comm_pkg */ /* AHB - create a new comm package just for extended info - this will work better with the assumed partition*/ hypre_ParCSRFindExtendCommPkg(A, newoff, found, extend_comm_pkg); *CF_marker_offd = hypre_TReAlloc(*CF_marker_offd, HYPRE_Int, *full_off_procNodes, HYPRE_MEMORY_HOST); hypre_exchange_marker(*extend_comm_pkg, CF_marker, *CF_marker_offd + A_ext_rows); if(num_functions > 1) { if (*full_off_procNodes > 0) *dof_func_offd = hypre_CTAlloc(HYPRE_Int, *full_off_procNodes, HYPRE_MEMORY_HOST); hypre_alt_insert_new_nodes(comm_pkg, *extend_comm_pkg, dof_func, *full_off_procNodes, *dof_func_offd); } hypre_TFree(found, HYPRE_MEMORY_HOST); HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_a_data->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_a_data); hypre_TFree(send_data, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXCHANGE_INTERP_DATA] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } void hypre_build_interp_colmap(hypre_ParCSRMatrix *P, HYPRE_Int full_off_procNodes, HYPRE_Int *tmp_CF_marker_offd, HYPRE_BigInt *fine_to_coarse_offd) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); #endif HYPRE_Int i, index; HYPRE_Int n_fine = hypre_CSRMatrixNumRows(P->diag); HYPRE_Int P_offd_size = P->offd->i[n_fine]; HYPRE_Int *P_offd_j = P->offd->j; HYPRE_BigInt *col_map_offd_P = NULL; HYPRE_Int *P_marker = NULL; if (full_off_procNodes) P_marker = hypre_TAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < full_off_procNodes; i++) P_marker[i] = 0; #ifdef HYPRE_CONCURRENT_HOPSCOTCH /* These two loops set P_marker[i] to 1 if it appears in P_offd_j and if * tmp_CF_marker_offd has i marked. num_cols_P_offd is then set to the * total number of times P_marker is set */ #pragma omp parallel for private(i,index) HYPRE_SMP_SCHEDULE for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if(tmp_CF_marker_offd[index] >= 0) { P_marker[index] = 1; } } HYPRE_Int prefix_sum_workspace[hypre_NumThreads() + 1]; HYPRE_Int num_cols_P_offd = 0; #pragma omp parallel private(i) { HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, full_off_procNodes); HYPRE_Int local_num_cols_P_offd = 0; for (i = i_begin; i < i_end; i++) { if (P_marker[i] == 1) local_num_cols_P_offd++; } hypre_prefix_sum(&local_num_cols_P_offd, &num_cols_P_offd, prefix_sum_workspace); #pragma omp master { if (num_cols_P_offd) col_map_offd_P = hypre_TAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); } #pragma omp barrier for (i = i_begin; i < i_end; i++) { if (P_marker[i] == 1) { col_map_offd_P[local_num_cols_P_offd++] = fine_to_coarse_offd[i]; } } } hypre_UnorderedBigIntMap col_map_offd_P_inverse; hypre_big_sort_and_create_inverse_map(col_map_offd_P, num_cols_P_offd, &col_map_offd_P, &col_map_offd_P_inverse); // find old idx -> new idx map #pragma omp parallel for for (i = 0; i < full_off_procNodes; i++) P_marker[i] = hypre_UnorderedBigIntMapGet(&col_map_offd_P_inverse, fine_to_coarse_offd[i]); if (num_cols_P_offd) { hypre_UnorderedBigIntMapDestroy(&col_map_offd_P_inverse); } #pragma omp parallel for for(i = 0; i < P_offd_size; i++) P_offd_j[i] = P_marker[P_offd_j[i]]; #else /* HYPRE_CONCURRENT_HOPSCOTCH */ HYPRE_Int num_cols_P_offd = 0; HYPRE_Int j; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { if(tmp_CF_marker_offd[index] >= 0) { num_cols_P_offd++; P_marker[index] = 1; } } } if (num_cols_P_offd) { HYPRE_Int *tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); HYPRE_BigInt *tmp_marker = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for(i = 0; i < num_cols_P_offd; i++) { while( P_marker[index] == 0) index++; tmp_map_offd[i] = index++; } for(i = 0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); index = 0; for(i = 0; i < num_cols_P_offd; i++) { while (P_marker[index] == 0) index++; col_map_offd_P[i] = fine_to_coarse_offd[index]; index++; } /* Sort the col_map_offd_P and P_offd_j correctly */ for(i = 0; i < num_cols_P_offd; i++) tmp_marker[i] = col_map_offd_P[i]; /* Check if sort actually changed anything */ if(hypre_ssort(col_map_offd_P,num_cols_P_offd)) { for(i = 0; i < P_offd_size; i++) for(j = 0; j < num_cols_P_offd; j++) if(tmp_marker[P_offd_j[i]] == col_map_offd_P[j]) { P_offd_j[i] = j; j = num_cols_P_offd; } } hypre_TFree(tmp_marker, HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); } #endif /* HYPRE_CONCURRENT_HOPSCOTCH */ hypre_TFree(P_marker, HYPRE_MEMORY_HOST); if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P->offd) = num_cols_P_offd; } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); #endif }
StmtOpenMP.h
//===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// \file /// This file defines OpenMP AST classes for executable directives and /// clauses. /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMTOPENMP_H #define LLVM_CLANG_AST_STMTOPENMP_H #include "clang/AST/Expr.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/Stmt.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" namespace clang { //===----------------------------------------------------------------------===// // AST classes for directives. //===----------------------------------------------------------------------===// /// This is a basic class for representing single OpenMP executable /// directive. /// class OMPExecutableDirective : public Stmt { friend class ASTStmtReader; /// Kind of the directive. OpenMPDirectiveKind Kind; /// Starting location of the directive (directive keyword). SourceLocation StartLoc; /// Ending location of the directive. SourceLocation EndLoc; /// Numbers of clauses. const unsigned NumClauses; /// Number of child expressions/stmts. const unsigned NumChildren; /// Offset from this to the start of clauses. /// There are NumClauses pointers to clauses, they are followed by /// NumChildren pointers to child stmts/exprs (if the directive type /// requires an associated stmt, then it has to be the first of them). const unsigned ClausesOffset; /// Get the clauses storage. MutableArrayRef<OMPClause *> getClauses() { OMPClause **ClauseStorage = reinterpret_cast<OMPClause **>( reinterpret_cast<char *>(this) + ClausesOffset); return MutableArrayRef<OMPClause *>(ClauseStorage, NumClauses); } protected: /// Build instance of directive of class \a K. /// /// \param SC Statement class. /// \param K Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// template <typename T> OMPExecutableDirective(const T *, StmtClass SC, OpenMPDirectiveKind K, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses, unsigned NumChildren) : Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)), EndLoc(std::move(EndLoc)), NumClauses(NumClauses), NumChildren(NumChildren), ClausesOffset(llvm::alignTo(sizeof(T), alignof(OMPClause *))) {} /// Sets the list of variables for this clause. /// /// \param Clauses The list of clauses for the directive. /// void setClauses(ArrayRef<OMPClause *> Clauses); /// Set the associated statement for the directive. /// /// /param S Associated statement. /// void setAssociatedStmt(Stmt *S) { assert(hasAssociatedStmt() && "no associated statement."); *child_begin() = S; } public: /// Iterates over expressions/statements used in the construct. class used_clauses_child_iterator : public llvm::iterator_adaptor_base< used_clauses_child_iterator, ArrayRef<OMPClause *>::iterator, std::forward_iterator_tag, Stmt *, ptrdiff_t, Stmt *, Stmt *> { ArrayRef<OMPClause *>::iterator End; OMPClause::child_iterator ChildI, ChildEnd; void MoveToNext() { if (ChildI != ChildEnd) return; while (this->I != End) { ++this->I; if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); if (ChildI != ChildEnd) return; } } } public: explicit used_clauses_child_iterator(ArrayRef<OMPClause *> Clauses) : used_clauses_child_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); MoveToNext(); } } Stmt *operator*() const { return *ChildI; } Stmt *operator->() const { return **this; } used_clauses_child_iterator &operator++() { ++ChildI; if (ChildI != ChildEnd) return *this; if (this->I != End) { ++this->I; if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); } } MoveToNext(); return *this; } }; static llvm::iterator_range<used_clauses_child_iterator> used_clauses_children(ArrayRef<OMPClause *> Clauses) { return {used_clauses_child_iterator(Clauses), used_clauses_child_iterator(llvm::makeArrayRef(Clauses.end(), 0))}; } /// Iterates over a filtered subrange of clauses applied to a /// directive. /// /// This iterator visits only clauses of type SpecificClause. template <typename SpecificClause> class specific_clause_iterator : public llvm::iterator_adaptor_base< specific_clause_iterator<SpecificClause>, ArrayRef<OMPClause *>::const_iterator, std::forward_iterator_tag, const SpecificClause *, ptrdiff_t, const SpecificClause *, const SpecificClause *> { ArrayRef<OMPClause *>::const_iterator End; void SkipToNextClause() { while (this->I != End && !isa<SpecificClause>(*this->I)) ++this->I; } public: explicit specific_clause_iterator(ArrayRef<OMPClause *> Clauses) : specific_clause_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { SkipToNextClause(); } const SpecificClause *operator*() const { return cast<SpecificClause>(*this->I); } const SpecificClause *operator->() const { return **this; } specific_clause_iterator &operator++() { ++this->I; SkipToNextClause(); return *this; } }; template <typename SpecificClause> static llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind(ArrayRef<OMPClause *> Clauses) { return {specific_clause_iterator<SpecificClause>(Clauses), specific_clause_iterator<SpecificClause>( llvm::makeArrayRef(Clauses.end(), 0))}; } template <typename SpecificClause> llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind() const { return getClausesOfKind<SpecificClause>(clauses()); } /// Gets a single clause of the specified kind associated with the /// current directive iff there is only one clause of this kind (and assertion /// is fired if there is more than one clause is associated with the /// directive). Returns nullptr if no clause of this kind is associated with /// the directive. template <typename SpecificClause> const SpecificClause *getSingleClause() const { auto Clauses = getClausesOfKind<SpecificClause>(); if (Clauses.begin() != Clauses.end()) { assert(std::next(Clauses.begin()) == Clauses.end() && "There are at least 2 clauses of the specified kind"); return *Clauses.begin(); } return nullptr; } /// Returns true if the current directive has one or more clauses of a /// specific kind. template <typename SpecificClause> bool hasClausesOfKind() const { auto Clauses = getClausesOfKind<SpecificClause>(); return Clauses.begin() != Clauses.end(); } /// Returns starting location of directive kind. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns ending location of directive. SourceLocation getEndLoc() const { return EndLoc; } /// Set starting location of directive kind. /// /// \param Loc New starting location of directive. /// void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Set ending location of directive. /// /// \param Loc New ending location of directive. /// void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Get number of clauses. unsigned getNumClauses() const { return NumClauses; } /// Returns specified clause. /// /// \param i Number of clause. /// OMPClause *getClause(unsigned i) const { return clauses()[i]; } /// Returns true if directive has associated statement. bool hasAssociatedStmt() const { return NumChildren > 0; } /// Returns statement associated with the directive. const Stmt *getAssociatedStmt() const { assert(hasAssociatedStmt() && "no associated statement."); return *child_begin(); } Stmt *getAssociatedStmt() { assert(hasAssociatedStmt() && "no associated statement."); return *child_begin(); } /// Returns the captured statement associated with the /// component region within the (combined) directive. // // \param RegionKind Component region kind. const CapturedStmt *getCapturedStmt(OpenMPDirectiveKind RegionKind) const { SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); assert(std::any_of( CaptureRegions.begin(), CaptureRegions.end(), [=](const OpenMPDirectiveKind K) { return K == RegionKind; }) && "RegionKind not found in OpenMP CaptureRegions."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (auto ThisCaptureRegion : CaptureRegions) { if (ThisCaptureRegion == RegionKind) return CS; CS = cast<CapturedStmt>(CS->getCapturedStmt()); } llvm_unreachable("Incorrect RegionKind specified for directive."); } /// Get innermost captured statement for the construct. CapturedStmt *getInnermostCapturedStmt() { assert(hasAssociatedStmt() && getAssociatedStmt() && "Must have associated statement."); SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); assert(!CaptureRegions.empty() && "At least one captured statement must be provided."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (unsigned Level = CaptureRegions.size(); Level > 1; --Level) CS = cast<CapturedStmt>(CS->getCapturedStmt()); return CS; } const CapturedStmt *getInnermostCapturedStmt() const { return const_cast<OMPExecutableDirective *>(this) ->getInnermostCapturedStmt(); } OpenMPDirectiveKind getDirectiveKind() const { return Kind; } static bool classof(const Stmt *S) { return S->getStmtClass() >= firstOMPExecutableDirectiveConstant && S->getStmtClass() <= lastOMPExecutableDirectiveConstant; } child_range children() { if (!hasAssociatedStmt()) return child_range(child_iterator(), child_iterator()); Stmt **ChildStorage = reinterpret_cast<Stmt **>(getClauses().end()); /// Do not mark all the special expression/statements as children, except /// for the associated statement. return child_range(ChildStorage, ChildStorage + 1); } const_child_range children() const { if (!hasAssociatedStmt()) return const_child_range(const_child_iterator(), const_child_iterator()); Stmt **ChildStorage = reinterpret_cast<Stmt **>( const_cast<OMPExecutableDirective *>(this)->getClauses().end()); return const_child_range(ChildStorage, ChildStorage + 1); } ArrayRef<OMPClause *> clauses() { return getClauses(); } ArrayRef<OMPClause *> clauses() const { return const_cast<OMPExecutableDirective *>(this)->getClauses(); } /// Returns whether or not this is a Standalone directive. /// /// Stand-alone directives are executable directives /// that have no associated user code. bool isStandaloneDirective() const; /// Returns the AST node representing OpenMP structured-block of this /// OpenMP executable directive, /// Prerequisite: Executable Directive must not be Standalone directive. const Stmt *getStructuredBlock() const; Stmt *getStructuredBlock() { return const_cast<Stmt *>( const_cast<const OMPExecutableDirective *>(this)->getStructuredBlock()); } }; /// This represents '#pragma omp parallel' directive. /// /// \code /// #pragma omp parallel private(a,b) reduction(+: c,d) /// \endcode /// In this example directive '#pragma omp parallel' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending Location of the directive. /// OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPParallelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement associated with the directive. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelDirectiveClass; } }; /// This is a common base class for loop directives ('omp simd', 'omp /// for', 'omp for simd' etc.). It is responsible for the loop code generation. /// class OMPLoopDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Number of collapsed loops as specified by 'collapse' clause. unsigned CollapsedNum; /// Offsets to the stored exprs. /// This enumeration contains offsets to all the pointers to children /// expressions stored in OMPLoopDirective. /// The first 9 children are necessary for all the loop directives, /// the next 8 are specific to the worksharing ones, and the next 11 are /// used for combined constructs containing two pragmas associated to loops. /// After the fixed children, three arrays of length CollapsedNum are /// allocated: loop counters, their updates and final values. /// PrevLowerBound and PrevUpperBound are used to communicate blocking /// information in composite constructs which require loop blocking /// DistInc is used to generate the increment expression for the distribute /// loop when combined with a further nested loop /// PrevEnsureUpperBound is used as the EnsureUpperBound expression for the /// for loop when combined with a previous distribute loop in the same pragma /// (e.g. 'distribute parallel for') /// enum { AssociatedStmtOffset = 0, IterationVariableOffset = 1, LastIterationOffset = 2, CalcLastIterationOffset = 3, PreConditionOffset = 4, CondOffset = 5, InitOffset = 6, IncOffset = 7, PreInitsOffset = 8, // The '...End' enumerators do not correspond to child expressions - they // specify the offset to the end (and start of the following counters/ // updates/finals arrays). DefaultEnd = 9, // The following 8 exprs are used by worksharing and distribute loops only. IsLastIterVariableOffset = 9, LowerBoundVariableOffset = 10, UpperBoundVariableOffset = 11, StrideVariableOffset = 12, EnsureUpperBoundOffset = 13, NextLowerBoundOffset = 14, NextUpperBoundOffset = 15, NumIterationsOffset = 16, // Offset to the end for worksharing loop directives. WorksharingEnd = 17, PrevLowerBoundVariableOffset = 17, PrevUpperBoundVariableOffset = 18, DistIncOffset = 19, PrevEnsureUpperBoundOffset = 20, CombinedLowerBoundVariableOffset = 21, CombinedUpperBoundVariableOffset = 22, CombinedEnsureUpperBoundOffset = 23, CombinedInitOffset = 24, CombinedConditionOffset = 25, CombinedNextLowerBoundOffset = 26, CombinedNextUpperBoundOffset = 27, CombinedDistConditionOffset = 28, CombinedParForInDistConditionOffset = 29, // Offset to the end (and start of the following counters/updates/finals // arrays) for combined distribute loop directives. CombinedDistributeEnd = 30, }; /// Get the counters storage. MutableArrayRef<Expr *> getCounters() { Expr **Storage = reinterpret_cast<Expr **>( &(*(std::next(child_begin(), getArraysOffset(getDirectiveKind()))))); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the private counters storage. MutableArrayRef<Expr *> getPrivateCounters() { Expr **Storage = reinterpret_cast<Expr **>(&*std::next( child_begin(), getArraysOffset(getDirectiveKind()) + CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the updates storage. MutableArrayRef<Expr *> getInits() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 2 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the updates storage. MutableArrayRef<Expr *> getUpdates() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 3 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the final counter updates storage. MutableArrayRef<Expr *> getFinals() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 4 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } protected: /// Build instance of loop directive of class \a Kind. /// /// \param SC Statement class. /// \param Kind Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed loops from 'collapse' clause. /// \param NumClauses Number of clauses. /// \param NumSpecialChildren Number of additional directive-specific stmts. /// template <typename T> OMPLoopDirective(const T *That, StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses, unsigned NumSpecialChildren = 0) : OMPExecutableDirective(That, SC, Kind, StartLoc, EndLoc, NumClauses, numLoopChildren(CollapsedNum, Kind) + NumSpecialChildren), CollapsedNum(CollapsedNum) {} /// Offset to the start of children expression arrays. static unsigned getArraysOffset(OpenMPDirectiveKind Kind) { if (isOpenMPLoopBoundSharingDirective(Kind)) return CombinedDistributeEnd; if (isOpenMPWorksharingDirective(Kind) || isOpenMPTaskLoopDirective(Kind) || isOpenMPDistributeDirective(Kind)) return WorksharingEnd; return DefaultEnd; } /// Children number. static unsigned numLoopChildren(unsigned CollapsedNum, OpenMPDirectiveKind Kind) { return getArraysOffset(Kind) + 5 * CollapsedNum; // Counters, // PrivateCounters, Inits, // Updates and Finals } void setIterationVariable(Expr *IV) { *std::next(child_begin(), IterationVariableOffset) = IV; } void setLastIteration(Expr *LI) { *std::next(child_begin(), LastIterationOffset) = LI; } void setCalcLastIteration(Expr *CLI) { *std::next(child_begin(), CalcLastIterationOffset) = CLI; } void setPreCond(Expr *PC) { *std::next(child_begin(), PreConditionOffset) = PC; } void setCond(Expr *Cond) { *std::next(child_begin(), CondOffset) = Cond; } void setInit(Expr *Init) { *std::next(child_begin(), InitOffset) = Init; } void setInc(Expr *Inc) { *std::next(child_begin(), IncOffset) = Inc; } void setPreInits(Stmt *PreInits) { *std::next(child_begin(), PreInitsOffset) = PreInits; } void setIsLastIterVariable(Expr *IL) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), IsLastIterVariableOffset) = IL; } void setLowerBoundVariable(Expr *LB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), LowerBoundVariableOffset) = LB; } void setUpperBoundVariable(Expr *UB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), UpperBoundVariableOffset) = UB; } void setStrideVariable(Expr *ST) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), StrideVariableOffset) = ST; } void setEnsureUpperBound(Expr *EUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), EnsureUpperBoundOffset) = EUB; } void setNextLowerBound(Expr *NLB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NextLowerBoundOffset) = NLB; } void setNextUpperBound(Expr *NUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NextUpperBoundOffset) = NUB; } void setNumIterations(Expr *NI) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NumIterationsOffset) = NI; } void setPrevLowerBoundVariable(Expr *PrevLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevLowerBoundVariableOffset) = PrevLB; } void setPrevUpperBoundVariable(Expr *PrevUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevUpperBoundVariableOffset) = PrevUB; } void setDistInc(Expr *DistInc) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), DistIncOffset) = DistInc; } void setPrevEnsureUpperBound(Expr *PrevEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevEnsureUpperBoundOffset) = PrevEUB; } void setCombinedLowerBoundVariable(Expr *CombLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedLowerBoundVariableOffset) = CombLB; } void setCombinedUpperBoundVariable(Expr *CombUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedUpperBoundVariableOffset) = CombUB; } void setCombinedEnsureUpperBound(Expr *CombEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedEnsureUpperBoundOffset) = CombEUB; } void setCombinedInit(Expr *CombInit) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedInitOffset) = CombInit; } void setCombinedCond(Expr *CombCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedConditionOffset) = CombCond; } void setCombinedNextLowerBound(Expr *CombNLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedNextLowerBoundOffset) = CombNLB; } void setCombinedNextUpperBound(Expr *CombNUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedNextUpperBoundOffset) = CombNUB; } void setCombinedDistCond(Expr *CombDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); *std::next(child_begin(), CombinedDistConditionOffset) = CombDistCond; } void setCombinedParForInDistCond(Expr *CombParForInDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); *std::next(child_begin(), CombinedParForInDistConditionOffset) = CombParForInDistCond; } void setCounters(ArrayRef<Expr *> A); void setPrivateCounters(ArrayRef<Expr *> A); void setInits(ArrayRef<Expr *> A); void setUpdates(ArrayRef<Expr *> A); void setFinals(ArrayRef<Expr *> A); public: /// The expressions built to support OpenMP loops in combined/composite /// pragmas (e.g. pragma omp distribute parallel for) struct DistCombinedHelperExprs { /// DistributeLowerBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *LB; /// DistributeUpperBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *UB; /// DistributeEnsureUpperBound - used when composing 'omp distribute' /// with 'omp for' in a same construct, EUB depends on DistUB Expr *EUB; /// Distribute loop iteration variable init used when composing 'omp /// distribute' /// with 'omp for' in a same construct Expr *Init; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct Expr *Cond; /// Update of LowerBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NLB; /// Update of UpperBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NUB; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct when schedule is chunked. Expr *DistCond; /// 'omp parallel for' loop condition used when composed with /// 'omp distribute' in the same construct and when schedule is /// chunked and the chunk size is 1. Expr *ParForInDistCond; }; /// The expressions built for the OpenMP loop CodeGen for the /// whole collapsed loop nest. struct HelperExprs { /// Loop iteration variable. Expr *IterationVarRef; /// Loop last iteration number. Expr *LastIteration; /// Loop number of iterations. Expr *NumIterations; /// Calculation of last iteration. Expr *CalcLastIteration; /// Loop pre-condition. Expr *PreCond; /// Loop condition. Expr *Cond; /// Loop iteration variable init. Expr *Init; /// Loop increment. Expr *Inc; /// IsLastIteration - local flag variable passed to runtime. Expr *IL; /// LowerBound - local variable passed to runtime. Expr *LB; /// UpperBound - local variable passed to runtime. Expr *UB; /// Stride - local variable passed to runtime. Expr *ST; /// EnsureUpperBound -- expression UB = min(UB, NumIterations). Expr *EUB; /// Update of LowerBound for statically scheduled 'omp for' loops. Expr *NLB; /// Update of UpperBound for statically scheduled 'omp for' loops. Expr *NUB; /// PreviousLowerBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevLB; /// PreviousUpperBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevUB; /// DistInc - increment expression for distribute loop when found /// combined with a further loop level (e.g. in 'distribute parallel for') /// expression IV = IV + ST Expr *DistInc; /// PrevEUB - expression similar to EUB but to be used when loop /// scheduling uses PrevLB and PrevUB (e.g. in 'distribute parallel for' /// when ensuring that the UB is either the calculated UB by the runtime or /// the end of the assigned distribute chunk) /// expression UB = min (UB, PrevUB) Expr *PrevEUB; /// Counters Loop counters. SmallVector<Expr *, 4> Counters; /// PrivateCounters Loop counters. SmallVector<Expr *, 4> PrivateCounters; /// Expressions for loop counters inits for CodeGen. SmallVector<Expr *, 4> Inits; /// Expressions for loop counters update for CodeGen. SmallVector<Expr *, 4> Updates; /// Final loop counter values for GodeGen. SmallVector<Expr *, 4> Finals; /// Init statement for all captured expressions. Stmt *PreInits; /// Expressions used when combining OpenMP loop pragmas DistCombinedHelperExprs DistCombinedFields; /// Check if all the expressions are built (does not check the /// worksharing ones). bool builtAll() { return IterationVarRef != nullptr && LastIteration != nullptr && NumIterations != nullptr && PreCond != nullptr && Cond != nullptr && Init != nullptr && Inc != nullptr; } /// Initialize all the fields to null. /// \param Size Number of elements in the counters/finals/updates arrays. void clear(unsigned Size) { IterationVarRef = nullptr; LastIteration = nullptr; CalcLastIteration = nullptr; PreCond = nullptr; Cond = nullptr; Init = nullptr; Inc = nullptr; IL = nullptr; LB = nullptr; UB = nullptr; ST = nullptr; EUB = nullptr; NLB = nullptr; NUB = nullptr; NumIterations = nullptr; PrevLB = nullptr; PrevUB = nullptr; DistInc = nullptr; PrevEUB = nullptr; Counters.resize(Size); PrivateCounters.resize(Size); Inits.resize(Size); Updates.resize(Size); Finals.resize(Size); for (unsigned i = 0; i < Size; ++i) { Counters[i] = nullptr; PrivateCounters[i] = nullptr; Inits[i] = nullptr; Updates[i] = nullptr; Finals[i] = nullptr; } PreInits = nullptr; DistCombinedFields.LB = nullptr; DistCombinedFields.UB = nullptr; DistCombinedFields.EUB = nullptr; DistCombinedFields.Init = nullptr; DistCombinedFields.Cond = nullptr; DistCombinedFields.NLB = nullptr; DistCombinedFields.NUB = nullptr; DistCombinedFields.DistCond = nullptr; DistCombinedFields.ParForInDistCond = nullptr; } }; /// Get number of collapsed loops. unsigned getCollapsedNumber() const { return CollapsedNum; } Expr *getIterationVariable() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), IterationVariableOffset))); } Expr *getLastIteration() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), LastIterationOffset))); } Expr *getCalcLastIteration() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CalcLastIterationOffset))); } Expr *getPreCond() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PreConditionOffset))); } Expr *getCond() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), CondOffset))); } Expr *getInit() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), InitOffset))); } Expr *getInc() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), IncOffset))); } const Stmt *getPreInits() const { return *std::next(child_begin(), PreInitsOffset); } Stmt *getPreInits() { return *std::next(child_begin(), PreInitsOffset); } Expr *getIsLastIterVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), IsLastIterVariableOffset))); } Expr *getLowerBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), LowerBoundVariableOffset))); } Expr *getUpperBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), UpperBoundVariableOffset))); } Expr *getStrideVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), StrideVariableOffset))); } Expr *getEnsureUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), EnsureUpperBoundOffset))); } Expr *getNextLowerBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NextLowerBoundOffset))); } Expr *getNextUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NextUpperBoundOffset))); } Expr *getNumIterations() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NumIterationsOffset))); } Expr *getPrevLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevLowerBoundVariableOffset))); } Expr *getPrevUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevUpperBoundVariableOffset))); } Expr *getDistInc() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), DistIncOffset))); } Expr *getPrevEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevEnsureUpperBoundOffset))); } Expr *getCombinedLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedLowerBoundVariableOffset))); } Expr *getCombinedUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedUpperBoundVariableOffset))); } Expr *getCombinedEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedEnsureUpperBoundOffset))); } Expr *getCombinedInit() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedInitOffset))); } Expr *getCombinedCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedConditionOffset))); } Expr *getCombinedNextLowerBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedNextLowerBoundOffset))); } Expr *getCombinedNextUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedNextUpperBoundOffset))); } Expr *getCombinedDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedDistConditionOffset))); } Expr *getCombinedParForInDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedParForInDistConditionOffset))); } const Stmt *getBody() const { // This relies on the loop form is already checked by Sema. const Stmt *Body = getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(); Body = cast<ForStmt>(Body)->getBody(); for (unsigned Cnt = 1; Cnt < CollapsedNum; ++Cnt) { Body = Body->IgnoreContainers(); Body = cast<ForStmt>(Body)->getBody(); } return Body; } ArrayRef<Expr *> counters() { return getCounters(); } ArrayRef<Expr *> counters() const { return const_cast<OMPLoopDirective *>(this)->getCounters(); } ArrayRef<Expr *> private_counters() { return getPrivateCounters(); } ArrayRef<Expr *> private_counters() const { return const_cast<OMPLoopDirective *>(this)->getPrivateCounters(); } ArrayRef<Expr *> inits() { return getInits(); } ArrayRef<Expr *> inits() const { return const_cast<OMPLoopDirective *>(this)->getInits(); } ArrayRef<Expr *> updates() { return getUpdates(); } ArrayRef<Expr *> updates() const { return const_cast<OMPLoopDirective *>(this)->getUpdates(); } ArrayRef<Expr *> finals() { return getFinals(); } ArrayRef<Expr *> finals() const { return const_cast<OMPLoopDirective *>(this)->getFinals(); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass || T->getStmtClass() == OMPForDirectiveClass || T->getStmtClass() == OMPForSimdDirectiveClass || T->getStmtClass() == OMPParallelForDirectiveClass || T->getStmtClass() == OMPParallelForSimdDirectiveClass || T->getStmtClass() == OMPTaskLoopDirectiveClass || T->getStmtClass() == OMPTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPDistributeDirectiveClass || T->getStmtClass() == OMPTargetParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPDistributeSimdDirectiveClass || T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp simd' directive. /// /// \code /// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass; } }; /// This represents '#pragma omp for' directive. /// /// \code /// #pragma omp for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for' has clauses 'private' with the /// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c' /// and 'd'. /// class OMPForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForDirectiveClass; } }; /// This represents '#pragma omp for simd' directive. /// /// \code /// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForSimdDirectiveClass; } }; /// This represents '#pragma omp sections' directive. /// /// \code /// #pragma omp sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp sections' has clauses 'private' with /// the variables 'a' and 'b' and 'reduction' with operator '+' and variables /// 'c' and 'd'. /// class OMPSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPSectionsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSectionsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionsDirectiveClass; } }; /// This represents '#pragma omp section' directive. /// /// \code /// #pragma omp section /// \endcode /// class OMPSectionDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section, StartLoc, EndLoc, 0, 1), HasCancel(false) {} /// Build an empty directive. /// explicit OMPSectionDirective() : OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section, SourceLocation(), SourceLocation(), 0, 1), HasCancel(false) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive. /// /// \param C AST context. /// static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionDirectiveClass; } }; /// This represents '#pragma omp single' directive. /// /// \code /// #pragma omp single private(a,b) copyprivate(c,d) /// \endcode /// In this example directive '#pragma omp single' has clauses 'private' with /// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'. /// class OMPSingleDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPSingleDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPSingleDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSingleDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSingleDirectiveClass; } }; /// This represents '#pragma omp master' directive. /// /// \code /// #pragma omp master /// \endcode /// class OMPMasterDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master, StartLoc, EndLoc, 0, 1) {} /// Build an empty directive. /// explicit OMPMasterDirective() : OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master, SourceLocation(), SourceLocation(), 0, 1) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPMasterDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterDirectiveClass; } }; /// This represents '#pragma omp critical' directive. /// /// \code /// #pragma omp critical /// \endcode /// class OMPCriticalDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Name of the directive. DeclarationNameInfo DirName; /// Build directive with the given start and end location. /// /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical, StartLoc, EndLoc, NumClauses, 1), DirName(Name) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPCriticalDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical, SourceLocation(), SourceLocation(), NumClauses, 1), DirName() {} /// Set name of the directive. /// /// \param Name Name of the directive. /// void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; } public: /// Creates directive. /// /// \param C AST context. /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPCriticalDirective * Create(const ASTContext &C, const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCriticalDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return name of the directive. /// DeclarationNameInfo getDirectiveName() const { return DirName; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCriticalDirectiveClass; } }; /// This represents '#pragma omp parallel for' directive. /// /// \code /// #pragma omp parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current region has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForDirectiveClass; } }; /// This represents '#pragma omp parallel for simd' directive. /// /// \code /// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for simd' has clauses /// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j' /// and linear step 's', 'reduction' with operator '+' and variables 'c' and /// 'd'. /// class OMPParallelForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForSimdDirectiveClass, OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForSimdDirectiveClass, OMPD_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp parallel sections' directive. /// /// \code /// #pragma omp parallel sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel sections' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPParallelSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass, OMPD_parallel_sections, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPParallelSectionsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass, OMPD_parallel_sections, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelSectionsDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelSectionsDirectiveClass; } }; /// This represents '#pragma omp task' directive. /// /// \code /// #pragma omp task private(a,b) final(d) /// \endcode /// In this example directive '#pragma omp task' has clauses 'private' with the /// variables 'a' and 'b' and 'final' with condition 'd'. /// class OMPTaskDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if this directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTaskDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true, if current directive has inner cancel directive. /// static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskDirectiveClass; } }; /// This represents '#pragma omp taskyield' directive. /// /// \code /// #pragma omp taskyield /// \endcode /// class OMPTaskyieldDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPTaskyieldDirective() : OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskyieldDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskyieldDirectiveClass; } }; /// This represents '#pragma omp barrier' directive. /// /// \code /// #pragma omp barrier /// \endcode /// class OMPBarrierDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPBarrierDirective() : OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPBarrierDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPBarrierDirectiveClass; } }; /// This represents '#pragma omp taskwait' directive. /// /// \code /// #pragma omp taskwait /// \endcode /// class OMPTaskwaitDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPTaskwaitDirective() : OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskwaitDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskwaitDirectiveClass; } }; /// This represents '#pragma omp taskgroup' directive. /// /// \code /// #pragma omp taskgroup /// \endcode /// class OMPTaskgroupDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup, StartLoc, EndLoc, NumClauses, 2) {} /// Build an empty directive. /// \param NumClauses Number of clauses. /// explicit OMPTaskgroupDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup, SourceLocation(), SourceLocation(), NumClauses, 2) {} /// Sets the task_reduction return variable. void setReductionRef(Expr *RR) { *std::next(child_begin(), 1) = RR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param ReductionRef Reference to the task_reduction return variable. /// static OMPTaskgroupDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *ReductionRef); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns reference to the task_reduction return variable. const Expr *getReductionRef() const { return static_cast<const Expr *>(*std::next(child_begin(), 1)); } Expr *getReductionRef() { return static_cast<Expr *>(*std::next(child_begin(), 1)); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskgroupDirectiveClass; } }; /// This represents '#pragma omp flush' directive. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has 2 arguments- variables 'a' /// and 'b'. /// 'omp flush' directive does not have clauses but have an optional list of /// variables to flush. This list of variables is stored within some fake clause /// FlushClause. class OMPFlushDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush, StartLoc, EndLoc, NumClauses, 0) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPFlushDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush, SourceLocation(), SourceLocation(), NumClauses, 0) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses (only single OMPFlushClause clause is /// allowed). /// static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPFlushDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPFlushDirectiveClass; } }; /// This represents '#pragma omp ordered' directive. /// /// \code /// #pragma omp ordered /// \endcode /// class OMPOrderedDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPOrderedDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPOrderedDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPOrderedDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPOrderedDirectiveClass; } }; /// This represents '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has clause 'capture'. /// class OMPAtomicDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// x = x binop expr; /// x = expr binop x; /// \endcode /// This field is true for the first form of the expression and false for the /// second. Required for correct codegen of non-associative operations (like /// << or >>). bool IsXLHSInRHSPart; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// v = x; <update x>; /// <update x>; v = x; /// \endcode /// This field is true for the first(postfix) form of the expression and false /// otherwise. bool IsPostfixUpdate; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic, StartLoc, EndLoc, NumClauses, 5), IsXLHSInRHSPart(false), IsPostfixUpdate(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPAtomicDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic, SourceLocation(), SourceLocation(), NumClauses, 5), IsXLHSInRHSPart(false), IsPostfixUpdate(false) {} /// Set 'x' part of the associated expression/statement. void setX(Expr *X) { *std::next(child_begin()) = X; } /// Set helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. void setUpdateExpr(Expr *UE) { *std::next(child_begin(), 2) = UE; } /// Set 'v' part of the associated expression/statement. void setV(Expr *V) { *std::next(child_begin(), 3) = V; } /// Set 'expr' part of the associated expression/statement. void setExpr(Expr *E) { *std::next(child_begin(), 4) = E; } public: /// Creates directive with a list of \a Clauses and 'x', 'v' and 'expr' /// parts of the atomic construct (see Section 2.12.6, atomic Construct, for /// detailed description of 'x', 'v' and 'expr'). /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param X 'x' part of the associated expression/statement. /// \param V 'v' part of the associated expression/statement. /// \param E 'expr' part of the associated expression/statement. /// \param UE Helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. /// \param IsXLHSInRHSPart true if \a UE has the first form and false if the /// second. /// \param IsPostfixUpdate true if original value of 'x' must be stored in /// 'v', not an updated one. static OMPAtomicDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V, Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPAtomicDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get 'x' part of the associated expression/statement. Expr *getX() { return cast_or_null<Expr>(*std::next(child_begin())); } const Expr *getX() const { return cast_or_null<Expr>(*std::next(child_begin())); } /// Get helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. Expr *getUpdateExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 2)); } const Expr *getUpdateExpr() const { return cast_or_null<Expr>(*std::next(child_begin(), 2)); } /// Return true if helper update expression has form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; } /// Return true if 'v' expression must be updated to original value of /// 'x', false if 'v' must be updated to the new value of 'x'. bool isPostfixUpdate() const { return IsPostfixUpdate; } /// Get 'v' part of the associated expression/statement. Expr *getV() { return cast_or_null<Expr>(*std::next(child_begin(), 3)); } const Expr *getV() const { return cast_or_null<Expr>(*std::next(child_begin(), 3)); } /// Get 'expr' part of the associated expression/statement. Expr *getExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 4)); } const Expr *getExpr() const { return cast_or_null<Expr>(*std::next(child_begin(), 4)); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPAtomicDirectiveClass; } }; /// This represents '#pragma omp target' directive. /// /// \code /// #pragma omp target if(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'if' with /// condition 'a'. /// class OMPTargetDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDirectiveClass; } }; /// This represents '#pragma omp target data' directive. /// /// \code /// #pragma omp target data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target data' has clauses 'device' /// with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDataDirectiveClass, OMPD_target_data, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDataDirectiveClass, OMPD_target_data, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDataDirectiveClass; } }; /// This represents '#pragma omp target enter data' directive. /// /// \code /// #pragma omp target enter data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target enter data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetEnterDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetEnterDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass, OMPD_target_enter_data, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetEnterDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass, OMPD_target_enter_data, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetEnterDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetEnterDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetEnterDataDirectiveClass; } }; /// This represents '#pragma omp target exit data' directive. /// /// \code /// #pragma omp target exit data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target exit data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetExitDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetExitDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass, OMPD_target_exit_data, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetExitDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass, OMPD_target_exit_data, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetExitDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetExitDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetExitDataDirectiveClass; } }; /// This represents '#pragma omp target parallel' directive. /// /// \code /// #pragma omp target parallel if(a) /// \endcode /// In this example directive '#pragma omp target parallel' has clause 'if' with /// condition 'a'. /// class OMPTargetParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetParallelDirectiveClass, OMPD_target_parallel, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetParallelDirectiveClass, OMPD_target_parallel, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetParallelDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelDirectiveClass; } }; /// This represents '#pragma omp target parallel for' directive. /// /// \code /// #pragma omp target parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp target parallel for' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPTargetParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current region has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForDirectiveClass, OMPD_target_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForDirectiveClass, OMPD_target_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPTargetParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForDirectiveClass; } }; /// This represents '#pragma omp teams' directive. /// /// \code /// #pragma omp teams if(a) /// \endcode /// In this example directive '#pragma omp teams' has clause 'if' with /// condition 'a'. /// class OMPTeamsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTeamsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDirectiveClass; } }; /// This represents '#pragma omp cancellation point' directive. /// /// \code /// #pragma omp cancellation point for /// \endcode /// /// In this example a cancellation point is created for innermost 'for' region. class OMPCancellationPointDirective : public OMPExecutableDirective { friend class ASTStmtReader; OpenMPDirectiveKind CancelRegion; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPCancellationPointDirectiveClass, OMPD_cancellation_point, StartLoc, EndLoc, 0, 0), CancelRegion(OMPD_unknown) {} /// Build an empty directive. /// explicit OMPCancellationPointDirective() : OMPExecutableDirective(this, OMPCancellationPointDirectiveClass, OMPD_cancellation_point, SourceLocation(), SourceLocation(), 0, 0), CancelRegion(OMPD_unknown) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPCancellationPointDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancellationPointDirectiveClass; } }; /// This represents '#pragma omp cancel' directive. /// /// \code /// #pragma omp cancel for /// \endcode /// /// In this example a cancel is created for innermost 'for' region. class OMPCancelDirective : public OMPExecutableDirective { friend class ASTStmtReader; OpenMPDirectiveKind CancelRegion; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel, StartLoc, EndLoc, NumClauses, 0), CancelRegion(OMPD_unknown) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. explicit OMPCancelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel, SourceLocation(), SourceLocation(), NumClauses, 0), CancelRegion(OMPD_unknown) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// static OMPCancelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCancelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancelDirectiveClass; } }; /// This represents '#pragma omp taskloop' directive. /// /// \code /// #pragma omp taskloop private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopDirectiveClass, OMPD_taskloop, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTaskLoopDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopDirectiveClass, OMPD_taskloop, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopDirectiveClass; } }; /// This represents '#pragma omp taskloop simd' directive. /// /// \code /// #pragma omp taskloop simd private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop simd' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass, OMPD_taskloop_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTaskLoopSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass, OMPD_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp distribute' directive. /// /// \code /// #pragma omp distribute private(a,b) /// \endcode /// In this example directive '#pragma omp distribute' has clauses 'private' /// with the variables 'a' and 'b' /// class OMPDistributeDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeDirectiveClass, OMPD_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeDirectiveClass, OMPD_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeDirectiveClass; } }; /// This represents '#pragma omp target update' directive. /// /// \code /// #pragma omp target update to(a) from(b) device(1) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' with /// argument 'a', clause 'from' with argument 'b' and clause 'device' with /// argument '1'. /// class OMPTargetUpdateDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetUpdateDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass, OMPD_target_update, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetUpdateDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass, OMPD_target_update, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetUpdateDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses The number of clauses. /// static OMPTargetUpdateDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetUpdateDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for' composite /// directive. /// /// \code /// #pragma omp distribute parallel for private(a,b) /// \endcode /// In this example directive '#pragma omp distribute parallel for' has clause /// 'private' with the variables 'a' and 'b' /// class OMPDistributeParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass, OMPD_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass, OMPD_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp distribute parallel for simd' has /// clause 'private' with the variables 'x' /// class OMPDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass, OMPD_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass, OMPD_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeParallelForSimdDirective *Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForSimdDirective *CreateEmpty( const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp distribute simd' composite directive. /// /// \code /// #pragma omp distribute simd private(x) /// \endcode /// In this example directive '#pragma omp distribute simd' has clause /// 'private' with the variables 'x' /// class OMPDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeSimdDirectiveClass, OMPD_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeSimdDirectiveClass, OMPD_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp target parallel for simd' directive. /// /// \code /// #pragma omp target parallel for simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target parallel for simd' has clauses /// 'private' with the variable 'a', 'map' with the variable 'b' and 'safelen' /// with the variable 'c'. /// class OMPTargetParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass, OMPD_target_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass, OMPD_target_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target simd' directive. /// /// \code /// #pragma omp target simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target simd' has clauses 'private' /// with the variable 'a', 'map' with the variable 'b' and 'safelen' with /// the variable 'c'. /// class OMPTargetSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetSimdDirectiveClass, OMPD_target_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetSimdDirectiveClass, OMPD_target_simd, SourceLocation(),SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute' directive. /// /// \code /// #pragma omp teams distribute private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute' has clauses /// 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass, OMPD_teams_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass, OMPD_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp teams distribute simd' /// combined directive. /// /// \code /// #pragma omp teams distribute simd private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute simd' /// has clause 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass, OMPD_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass, OMPD_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for simd' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass, OMPD_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass, OMPD_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass, OMPD_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass, OMPD_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams' directive. /// /// \code /// #pragma omp target teams if(a>0) /// \endcode /// In this example directive '#pragma omp target teams' has clause 'if' with /// condition 'a>0'. /// class OMPTargetTeamsDirective final : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass, OMPD_target_teams, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass, OMPD_target_teams, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDirectiveClass; } }; /// This represents '#pragma omp target teams distribute' combined directive. /// /// \code /// #pragma omp target teams distribute private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute' has clause /// 'private' with the variables 'x' /// class OMPTargetTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass, OMPD_target_teams_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass, OMPD_target_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for' combined /// directive. /// /// \code /// #pragma omp target teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeParallelForDirectiveClass, OMPD_target_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective( this, OMPTargetTeamsDistributeParallelForDirectiveClass, OMPD_target_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTargetTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for simd' /// combined directive. /// /// \code /// #pragma omp target teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for simd' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeParallelForSimdDirectiveClass, OMPD_target_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeParallelForSimdDirective( unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective( this, OMPTargetTeamsDistributeParallelForSimdDirectiveClass, OMPD_target_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target teams distribute simd' combined /// directive. /// /// \code /// #pragma omp target teams distribute simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute simd' /// has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass, OMPD_target_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass, OMPD_target_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; } // end namespace clang #endif
bug_proxy_task_dep_waiting.c
// RUN: %libomp-compile-and-run // The runtime currently does not get dependency information from GCC. // UNSUPPORTED: gcc // Very flaky on openmp-clang-x86_64-linux-debian. // https://bugs.llvm.org/show_bug.cgi?id=45397 // UNSUPPORTED: linux #include <stdio.h> #include <omp.h> #include <pthread.h> #include "omp_my_sleep.h" /* An explicit task can have a dependency on a target task. If it is not directly satisfied, the runtime should not wait but resume execution. */ // Compiler-generated code (emulation) typedef long kmp_intptr_t; typedef int kmp_int32; typedef char bool; typedef struct ident { kmp_int32 reserved_1; /**< might be used in Fortran; see above */ kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags; KMP_IDENT_KMPC identifies this union member */ kmp_int32 reserved_2; /**< not really used in Fortran any more; see above */ #if USE_ITT_BUILD /* but currently used for storing region-specific ITT */ /* contextual information. */ #endif /* USE_ITT_BUILD */ kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for C++ */ char const *psource; /**< String describing the source location. The string is composed of semi-colon separated fields which describe the source file, the function and a pair of line numbers that delimit the construct. */ } ident_t; typedef struct kmp_depend_info { kmp_intptr_t base_addr; size_t len; struct { bool in:1; bool out:1; } flags; } kmp_depend_info_t; struct kmp_task; typedef kmp_int32 (* kmp_routine_entry_t)( kmp_int32, struct kmp_task * ); typedef struct kmp_task { /* GEH: Shouldn't this be aligned somehow? */ void * shareds; /**< pointer to block of pointers to shared vars */ kmp_routine_entry_t routine; /**< pointer to routine to call for executing task */ kmp_int32 part_id; /**< part id for the task */ } kmp_task_t; #ifdef __cplusplus extern "C" { #endif kmp_int32 __kmpc_global_thread_num ( ident_t * ); kmp_task_t* __kmpc_omp_task_alloc( ident_t *loc_ref, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t task_entry ); void __kmpc_proxy_task_completed_ooo ( kmp_task_t *ptask ); kmp_int32 __kmpc_omp_task_with_deps ( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list ); kmp_int32 __kmpc_omp_task( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task ); #ifdef __cplusplus } #endif void *target(void *task) { my_sleep( 0.1 ); __kmpc_proxy_task_completed_ooo((kmp_task_t*) task); return NULL; } pthread_t target_thread; // User's code int task_entry(kmp_int32 gtid, kmp_task_t *task) { pthread_create(&target_thread, NULL, &target, task); return 0; } int main() { int dep; /* * Corresponds to: #pragma omp target nowait depend(out: dep) { my_sleep( 0.1 ); } */ kmp_depend_info_t dep_info; dep_info.base_addr = (long) &dep; dep_info.len = sizeof(int); // out = inout per spec and runtime expects this dep_info.flags.in = 1; dep_info.flags.out = 1; kmp_int32 gtid = __kmpc_global_thread_num(NULL); kmp_task_t *proxy_task = __kmpc_omp_task_alloc(NULL,gtid,17,sizeof(kmp_task_t),0,&task_entry); __kmpc_omp_task_with_deps(NULL,gtid,proxy_task,1,&dep_info,0,NULL); int first_task_finished = 0; #pragma omp task shared(first_task_finished) depend(inout: dep) { first_task_finished = 1; } int second_task_finished = 0; #pragma omp task shared(second_task_finished) depend(in: dep) { second_task_finished = 1; } // check that execution has been resumed and the runtime has not waited // for the dependencies to be satisfied. int error = (first_task_finished == 1); error += (second_task_finished == 1); #pragma omp taskwait // by now all tasks should have finished error += (first_task_finished != 1); error += (second_task_finished != 1); return error; }
resample.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % RRRR EEEEE SSSSS AAA M M PPPP L EEEEE % % R R E SS A A MM MM P P L E % % RRRR EEE SSS AAAAA M M M PPPP L EEE % % R R E SS A A M M P L E % % R R EEEEE SSSSS A A M M P LLLLL EEEEE % % % % % % MagickCore Pixel Resampling Methods % % % % Software Design % % Cristy % % Anthony Thyssen % % August 2007 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/color-private.h" #include "MagickCore/cache.h" #include "MagickCore/draw.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/random_.h" #include "MagickCore/resample.h" #include "MagickCore/resize.h" #include "MagickCore/resize-private.h" #include "MagickCore/resource_.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/signature-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/option.h" /* EWA Resampling Options */ /* select ONE resampling method */ #define EWA 1 /* Normal EWA handling - raw or clamped */ /* if 0 then use "High Quality EWA" */ #define EWA_CLAMP 1 /* EWA Clamping from Nicolas Robidoux */ #define FILTER_LUT 1 /* Use a LUT rather then direct filter calls */ /* output debugging information */ #define DEBUG_ELLIPSE 0 /* output ellipse info for debug */ #define DEBUG_HIT_MISS 0 /* output hit/miss pixels (as gnuplot commands) */ #define DEBUG_NO_PIXEL_HIT 0 /* Make pixels that fail to hit anything - RED */ #if ! FILTER_DIRECT #define WLUT_WIDTH 1024 /* size of the filter cache */ #endif /* Typedef declarations. */ struct _ResampleFilter { CacheView *view; Image *image; ExceptionInfo *exception; MagickBooleanType debug; /* Information about image being resampled */ ssize_t image_area; PixelInterpolateMethod interpolate; VirtualPixelMethod virtual_pixel; FilterType filter; /* processing settings needed */ MagickBooleanType limit_reached, do_interpolate, average_defined; PixelInfo average_pixel; /* current ellipitical area being resampled around center point */ double A, B, C, Vlimit, Ulimit, Uwidth, slope; #if FILTER_LUT /* LUT of weights for filtered average in elliptical area */ double filter_lut[WLUT_WIDTH]; #else /* Use a Direct call to the filter functions */ ResizeFilter *filter_def; double F; #endif /* the practical working support of the filter */ double support; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e R e s a m p l e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireResampleFilter() initializes the information resample needs do to a % scaled lookup of a color from an image, using area sampling. % % The algorithm is based on a Elliptical Weighted Average, where the pixels % found in a large elliptical area is averaged together according to a % weighting (filter) function. For more details see "Fundamentals of Texture % Mapping and Image Warping" a master's thesis by Paul.S.Heckbert, June 17, % 1989. Available for free from, http://www.cs.cmu.edu/~ph/ % % As EWA resampling (or any sort of resampling) can require a lot of % calculations to produce a distorted scaling of the source image for each % output pixel, the ResampleFilter structure generated holds that information % between individual image resampling. % % This function will make the appropriate AcquireCacheView() calls % to view the image, calling functions do not need to open a cache view. % % Usage Example... % resample_filter=AcquireResampleFilter(image,exception); % SetResampleFilter(resample_filter, GaussianFilter); % for (y=0; y < (ssize_t) image->rows; y++) { % for (x=0; x < (ssize_t) image->columns; x++) { % u= ....; v= ....; % ScaleResampleFilter(resample_filter, ... scaling vectors ...); % (void) ResamplePixelColor(resample_filter,u,v,&pixel); % ... assign resampled pixel value ... % } % } % DestroyResampleFilter(resample_filter); % % The format of the AcquireResampleFilter method is: % % ResampleFilter *AcquireResampleFilter(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ResampleFilter *AcquireResampleFilter(const Image *image, ExceptionInfo *exception) { register ResampleFilter *resample_filter; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); resample_filter=(ResampleFilter *) AcquireCriticalMemory(sizeof( *resample_filter)); (void) memset(resample_filter,0,sizeof(*resample_filter)); resample_filter->exception=exception; resample_filter->image=ReferenceImage((Image *) image); resample_filter->view=AcquireVirtualCacheView(resample_filter->image, exception); resample_filter->debug=IsEventLogging(); resample_filter->image_area=(ssize_t) (image->columns*image->rows); resample_filter->average_defined=MagickFalse; resample_filter->signature=MagickCoreSignature; SetResampleFilter(resample_filter,image->filter); (void) SetResampleFilterInterpolateMethod(resample_filter,image->interpolate); (void) SetResampleFilterVirtualPixelMethod(resample_filter, GetImageVirtualPixelMethod(image)); return(resample_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y R e s a m p l e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyResampleFilter() finalizes and cleans up the resampling % resample_filter as returned by AcquireResampleFilter(), freeing any memory % or other information as needed. % % The format of the DestroyResampleFilter method is: % % ResampleFilter *DestroyResampleFilter(ResampleFilter *resample_filter) % % A description of each parameter follows: % % o resample_filter: resampling information structure % */ MagickExport ResampleFilter *DestroyResampleFilter( ResampleFilter *resample_filter) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->view=DestroyCacheView(resample_filter->view); resample_filter->image=DestroyImage(resample_filter->image); #if ! FILTER_LUT resample_filter->filter_def=DestroyResizeFilter(resample_filter->filter_def); #endif resample_filter->signature=(~MagickCoreSignature); resample_filter=(ResampleFilter *) RelinquishMagickMemory(resample_filter); return(resample_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s a m p l e P i x e l C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResamplePixelColor() samples the pixel values surrounding the location % given using an elliptical weighted average, at the scale previously % calculated, and in the most efficent manner possible for the % VirtualPixelMethod setting. % % The format of the ResamplePixelColor method is: % % MagickBooleanType ResamplePixelColor(ResampleFilter *resample_filter, % const double u0,const double v0,PixelInfo *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o u0,v0: A double representing the center of the area to resample, % The distortion transformed transformed x,y coordinate. % % o pixel: the resampled pixel is returned here. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ResamplePixelColor( ResampleFilter *resample_filter,const double u0,const double v0, PixelInfo *pixel,ExceptionInfo *exception) { MagickBooleanType status; ssize_t u,v, v1, v2, uw, hit; double u1; double U,V,Q,DQ,DDQ; double divisor_c,divisor_m; register double weight; register const Quantum *pixels; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); status=MagickTrue; /* GetPixelInfo(resample_filter->image,pixel); */ if ( resample_filter->do_interpolate ) { status=InterpolatePixelInfo(resample_filter->image,resample_filter->view, resample_filter->interpolate,u0,v0,pixel,resample_filter->exception); return(status); } #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "u0=%lf; v0=%lf;\n", u0, v0); #endif /* Does resample area Miss the image Proper? If and that area a simple solid color - then simply return that color! This saves a lot of calculation when resampling outside the bounds of the source image. However it probably should be expanded to image bounds plus the filters scaled support size. */ hit = 0; switch ( resample_filter->virtual_pixel ) { case BackgroundVirtualPixelMethod: case TransparentVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case WhiteVirtualPixelMethod: case MaskVirtualPixelMethod: if ( resample_filter->limit_reached || u0 + resample_filter->Ulimit < 0.0 || u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0 || v0 + resample_filter->Vlimit < 0.0 || v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 ) hit++; break; case UndefinedVirtualPixelMethod: case EdgeVirtualPixelMethod: if ( ( u0 + resample_filter->Ulimit < 0.0 && v0 + resample_filter->Vlimit < 0.0 ) || ( u0 + resample_filter->Ulimit < 0.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0 && v0 + resample_filter->Vlimit < 0.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 ) ) hit++; break; case HorizontalTileVirtualPixelMethod: if ( v0 + resample_filter->Vlimit < 0.0 || v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 ) hit++; /* outside the horizontally tiled images. */ break; case VerticalTileVirtualPixelMethod: if ( u0 + resample_filter->Ulimit < 0.0 || u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0 ) hit++; /* outside the vertically tiled images. */ break; case DitherVirtualPixelMethod: if ( ( u0 + resample_filter->Ulimit < -32.0 && v0 + resample_filter->Vlimit < -32.0 ) || ( u0 + resample_filter->Ulimit < -32.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0 && v0 + resample_filter->Vlimit < -32.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 ) ) hit++; break; case TileVirtualPixelMethod: case MirrorVirtualPixelMethod: case RandomVirtualPixelMethod: case HorizontalTileEdgeVirtualPixelMethod: case VerticalTileEdgeVirtualPixelMethod: case CheckerTileVirtualPixelMethod: /* resampling of area is always needed - no VP limits */ break; } if ( hit ) { /* The area being resampled is simply a solid color * just return a single lookup color. * * Should this return the users requested interpolated color? */ status=InterpolatePixelInfo(resample_filter->image,resample_filter->view, IntegerInterpolatePixel,u0,v0,pixel,resample_filter->exception); return(status); } /* When Scaling limits reached, return an 'averaged' result. */ if ( resample_filter->limit_reached ) { switch ( resample_filter->virtual_pixel ) { /* This is always handled by the above, so no need. case BackgroundVirtualPixelMethod: case ConstantVirtualPixelMethod: case TransparentVirtualPixelMethod: case GrayVirtualPixelMethod, case WhiteVirtualPixelMethod case MaskVirtualPixelMethod: */ case UndefinedVirtualPixelMethod: case EdgeVirtualPixelMethod: case DitherVirtualPixelMethod: case HorizontalTileEdgeVirtualPixelMethod: case VerticalTileEdgeVirtualPixelMethod: /* We need an average edge pixel, from the correct edge! How should I calculate an average edge color? Just returning an averaged neighbourhood, works well in general, but falls down for TileEdge methods. This needs to be done properly!!!!!! */ status=InterpolatePixelInfo(resample_filter->image, resample_filter->view,AverageInterpolatePixel,u0,v0,pixel, resample_filter->exception); break; case HorizontalTileVirtualPixelMethod: case VerticalTileVirtualPixelMethod: /* just return the background pixel - Is there more direct way? */ status=InterpolatePixelInfo(resample_filter->image, resample_filter->view,IntegerInterpolatePixel,-1.0,-1.0,pixel, resample_filter->exception); break; case TileVirtualPixelMethod: case MirrorVirtualPixelMethod: case RandomVirtualPixelMethod: case CheckerTileVirtualPixelMethod: default: /* generate a average color of the WHOLE image */ if ( resample_filter->average_defined == MagickFalse ) { Image *average_image; CacheView *average_view; GetPixelInfo(resample_filter->image,(PixelInfo *) &resample_filter->average_pixel); resample_filter->average_defined=MagickTrue; /* Try to get an averaged pixel color of whole image */ average_image=ResizeImage(resample_filter->image,1,1,BoxFilter, resample_filter->exception); if (average_image == (Image *) NULL) { *pixel=resample_filter->average_pixel; /* FAILED */ break; } average_view=AcquireVirtualCacheView(average_image,exception); pixels=GetCacheViewVirtualPixels(average_view,0,0,1,1, resample_filter->exception); if (pixels == (const Quantum *) NULL) { average_view=DestroyCacheView(average_view); average_image=DestroyImage(average_image); *pixel=resample_filter->average_pixel; /* FAILED */ break; } GetPixelInfoPixel(resample_filter->image,pixels, &(resample_filter->average_pixel)); average_view=DestroyCacheView(average_view); average_image=DestroyImage(average_image); if ( resample_filter->virtual_pixel == CheckerTileVirtualPixelMethod ) { /* CheckerTile is a alpha blend of the image's average pixel color and the current background color */ /* image's average pixel color */ weight = QuantumScale*((double) resample_filter->average_pixel.alpha); resample_filter->average_pixel.red *= weight; resample_filter->average_pixel.green *= weight; resample_filter->average_pixel.blue *= weight; divisor_c = weight; /* background color */ weight = QuantumScale*((double) resample_filter->image->background_color.alpha); resample_filter->average_pixel.red += weight*resample_filter->image->background_color.red; resample_filter->average_pixel.green += weight*resample_filter->image->background_color.green; resample_filter->average_pixel.blue += weight*resample_filter->image->background_color.blue; resample_filter->average_pixel.alpha += resample_filter->image->background_color.alpha; divisor_c += weight; /* alpha blend */ resample_filter->average_pixel.red /= divisor_c; resample_filter->average_pixel.green /= divisor_c; resample_filter->average_pixel.blue /= divisor_c; resample_filter->average_pixel.alpha /= 2; /* 50% blend */ } } *pixel=resample_filter->average_pixel; break; } return(status); } /* Initialize weighted average data collection */ hit = 0; divisor_c = 0.0; divisor_m = 0.0; pixel->red = pixel->green = pixel->blue = 0.0; if (pixel->colorspace == CMYKColorspace) pixel->black = 0.0; if (pixel->alpha_trait != UndefinedPixelTrait) pixel->alpha = 0.0; /* Determine the parellelogram bounding box fitted to the ellipse centered at u0,v0. This area is bounding by the lines... */ v1 = (ssize_t)ceil(v0 - resample_filter->Vlimit); /* range of scan lines */ v2 = (ssize_t)floor(v0 + resample_filter->Vlimit); /* scan line start and width accross the parallelogram */ u1 = u0 + (v1-v0)*resample_filter->slope - resample_filter->Uwidth; uw = (ssize_t)(2.0*resample_filter->Uwidth)+1; #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "v1=%ld; v2=%ld\n", (long)v1, (long)v2); (void) FormatLocaleFile(stderr, "u1=%ld; uw=%ld\n", (long)u1, (long)uw); #else # define DEBUG_HIT_MISS 0 /* only valid if DEBUG_ELLIPSE is enabled */ #endif /* Do weighted resampling of all pixels, within the scaled ellipse, bound by a Parellelogram fitted to the ellipse. */ DDQ = 2*resample_filter->A; for( v=v1; v<=v2; v++ ) { #if DEBUG_HIT_MISS long uu = ceil(u1); /* actual pixel location (for debug only) */ (void) FormatLocaleFile(stderr, "# scan line from pixel %ld, %ld\n", (long)uu, (long)v); #endif u = (ssize_t)ceil(u1); /* first pixel in scanline */ u1 += resample_filter->slope; /* start of next scan line */ /* location of this first pixel, relative to u0,v0 */ U = (double)u-u0; V = (double)v-v0; /* Q = ellipse quotent ( if Q<F then pixel is inside ellipse) */ Q = (resample_filter->A*U + resample_filter->B*V)*U + resample_filter->C*V*V; DQ = resample_filter->A*(2.0*U+1) + resample_filter->B*V; /* get the scanline of pixels for this v */ pixels=GetCacheViewVirtualPixels(resample_filter->view,u,v,(size_t) uw, 1,resample_filter->exception); if (pixels == (const Quantum *) NULL) return(MagickFalse); /* count up the weighted pixel colors */ for( u=0; u<uw; u++ ) { #if FILTER_LUT /* Note that the ellipse has been pre-scaled so F = WLUT_WIDTH */ if ( Q < (double)WLUT_WIDTH ) { weight = resample_filter->filter_lut[(int)Q]; #else /* Note that the ellipse has been pre-scaled so F = support^2 */ if ( Q < (double)resample_filter->F ) { weight = GetResizeFilterWeight(resample_filter->filter_def, sqrt(Q)); /* a SquareRoot! Arrggghhhhh... */ #endif pixel->alpha += weight*GetPixelAlpha(resample_filter->image,pixels); divisor_m += weight; if (pixel->alpha_trait != UndefinedPixelTrait) weight *= QuantumScale*((double) GetPixelAlpha(resample_filter->image,pixels)); pixel->red += weight*GetPixelRed(resample_filter->image,pixels); pixel->green += weight*GetPixelGreen(resample_filter->image,pixels); pixel->blue += weight*GetPixelBlue(resample_filter->image,pixels); if (pixel->colorspace == CMYKColorspace) pixel->black += weight*GetPixelBlack(resample_filter->image,pixels); divisor_c += weight; hit++; #if DEBUG_HIT_MISS /* mark the pixel according to hit/miss of the ellipse */ (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n", (long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1); (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n", (long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1); } else { (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n", (long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1); (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n", (long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1); } uu++; #else } #endif pixels+=GetPixelChannels(resample_filter->image); Q += DQ; DQ += DDQ; } } #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "Hit=%ld; Total=%ld;\n", (long)hit, (long)uw*(v2-v1) ); #endif /* Result sanity check -- this should NOT happen */ if ( hit == 0 || divisor_m <= MagickEpsilon || divisor_c <= MagickEpsilon ) { /* not enough pixels, or bad weighting in resampling, resort to direct interpolation */ #if DEBUG_NO_PIXEL_HIT pixel->alpha = pixel->red = pixel->green = pixel->blue = 0; pixel->red = QuantumRange; /* show pixels for which EWA fails */ #else status=InterpolatePixelInfo(resample_filter->image, resample_filter->view,resample_filter->interpolate,u0,v0,pixel, resample_filter->exception); #endif return status; } /* Finialize results of resampling */ divisor_m = 1.0/divisor_m; if (pixel->alpha_trait != UndefinedPixelTrait) pixel->alpha = (double) ClampToQuantum(divisor_m*pixel->alpha); divisor_c = 1.0/divisor_c; pixel->red = (double) ClampToQuantum(divisor_c*pixel->red); pixel->green = (double) ClampToQuantum(divisor_c*pixel->green); pixel->blue = (double) ClampToQuantum(divisor_c*pixel->blue); if (pixel->colorspace == CMYKColorspace) pixel->black = (double) ClampToQuantum(divisor_c*pixel->black); return(MagickTrue); } #if EWA && EWA_CLAMP /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % - C l a m p U p A x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampUpAxes() function converts the input vectors into a major and % minor axis unit vectors, and their magnitude. This allows us to % ensure that the ellipse generated is never smaller than the unit % circle and thus never too small for use in EWA resampling. % % This purely mathematical 'magic' was provided by Professor Nicolas % Robidoux and his Masters student Chantal Racette. % % Reference: "We Recommend Singular Value Decomposition", David Austin % http://www.ams.org/samplings/feature-column/fcarc-svd % % By generating major and minor axis vectors, we can actually use the % ellipse in its "canonical form", by remapping the dx,dy of the % sampled point into distances along the major and minor axis unit % vectors. % % Reference: http://en.wikipedia.org/wiki/Ellipse#Canonical_form */ static inline void ClampUpAxes(const double dux, const double dvx, const double duy, const double dvy, double *major_mag, double *minor_mag, double *major_unit_x, double *major_unit_y, double *minor_unit_x, double *minor_unit_y) { /* * ClampUpAxes takes an input 2x2 matrix * * [ a b ] = [ dux duy ] * [ c d ] = [ dvx dvy ] * * and computes from it the major and minor axis vectors [major_x, * major_y] and [minor_x,minor_y] of the smallest ellipse containing * both the unit disk and the ellipse which is the image of the unit * disk by the linear transformation * * [ dux duy ] [S] = [s] * [ dvx dvy ] [T] = [t] * * (The vector [S,T] is the difference between a position in output * space and [X,Y]; the vector [s,t] is the difference between a * position in input space and [x,y].) */ /* * Output: * * major_mag is the half-length of the major axis of the "new" * ellipse. * * minor_mag is the half-length of the minor axis of the "new" * ellipse. * * major_unit_x is the x-coordinate of the major axis direction vector * of both the "old" and "new" ellipses. * * major_unit_y is the y-coordinate of the major axis direction vector. * * minor_unit_x is the x-coordinate of the minor axis direction vector. * * minor_unit_y is the y-coordinate of the minor axis direction vector. * * Unit vectors are useful for computing projections, in particular, * to compute the distance between a point in output space and the * center of a unit disk in output space, using the position of the * corresponding point [s,t] in input space. Following the clamping, * the square of this distance is * * ( ( s * major_unit_x + t * major_unit_y ) / major_mag )^2 * + * ( ( s * minor_unit_x + t * minor_unit_y ) / minor_mag )^2 * * If such distances will be computed for many [s,t]'s, it makes * sense to actually compute the reciprocal of major_mag and * minor_mag and multiply them by the above unit lengths. * * Now, if you want to modify the input pair of tangent vectors so * that it defines the modified ellipse, all you have to do is set * * newdux = major_mag * major_unit_x * newdvx = major_mag * major_unit_y * newduy = minor_mag * minor_unit_x = minor_mag * -major_unit_y * newdvy = minor_mag * minor_unit_y = minor_mag * major_unit_x * * and use these tangent vectors as if they were the original ones. * Usually, this is a drastic change in the tangent vectors even if * the singular values are not clamped; for example, the minor axis * vector always points in a direction which is 90 degrees * counterclockwise from the direction of the major axis vector. */ /* * Discussion: * * GOAL: Fix things so that the pullback, in input space, of a disk * of radius r in output space is an ellipse which contains, at * least, a disc of radius r. (Make this hold for any r>0.) * * ESSENCE OF THE METHOD: Compute the product of the first two * factors of an SVD of the linear transformation defining the * ellipse and make sure that both its columns have norm at least 1. * Because rotations and reflexions map disks to themselves, it is * not necessary to compute the third (rightmost) factor of the SVD. * * DETAILS: Find the singular values and (unit) left singular * vectors of Jinv, clampling up the singular values to 1, and * multiply the unit left singular vectors by the new singular * values in order to get the minor and major ellipse axis vectors. * * Image resampling context: * * The Jacobian matrix of the transformation at the output point * under consideration is defined as follows: * * Consider the transformation (x,y) -> (X,Y) from input locations * to output locations. (Anthony Thyssen, elsewhere in resample.c, * uses the notation (u,v) -> (x,y).) * * The Jacobian matrix of the transformation at (x,y) is equal to * * J = [ A, B ] = [ dX/dx, dX/dy ] * [ C, D ] [ dY/dx, dY/dy ] * * that is, the vector [A,C] is the tangent vector corresponding to * input changes in the horizontal direction, and the vector [B,D] * is the tangent vector corresponding to input changes in the * vertical direction. * * In the context of resampling, it is natural to use the inverse * Jacobian matrix Jinv because resampling is generally performed by * pulling pixel locations in the output image back to locations in * the input image. Jinv is * * Jinv = [ a, b ] = [ dx/dX, dx/dY ] * [ c, d ] [ dy/dX, dy/dY ] * * Note: Jinv can be computed from J with the following matrix * formula: * * Jinv = 1/(A*D-B*C) [ D, -B ] * [ -C, A ] * * What we do is modify Jinv so that it generates an ellipse which * is as close as possible to the original but which contains the * unit disk. This can be accomplished as follows: * * Let * * Jinv = U Sigma V^T * * be an SVD decomposition of Jinv. (The SVD is not unique, but the * final ellipse does not depend on the particular SVD.) * * We could clamp up the entries of the diagonal matrix Sigma so * that they are at least 1, and then set * * Jinv = U newSigma V^T. * * However, we do not need to compute V for the following reason: * V^T is an orthogonal matrix (that is, it represents a combination * of rotations and reflexions) so that it maps the unit circle to * itself. For this reason, the exact value of V does not affect the * final ellipse, and we can choose V to be the identity * matrix. This gives * * Jinv = U newSigma. * * In the end, we return the two diagonal entries of newSigma * together with the two columns of U. */ /* * ClampUpAxes was written by Nicolas Robidoux and Chantal Racette * of Laurentian University with insightful suggestions from Anthony * Thyssen and funding from the National Science and Engineering * Research Council of Canada. It is distinguished from its * predecessors by its efficient handling of degenerate cases. * * The idea of clamping up the EWA ellipse's major and minor axes so * that the result contains the reconstruction kernel filter support * is taken from Andreas Gustaffson's Masters thesis "Interactive * Image Warping", Helsinki University of Technology, Faculty of * Information Technology, 59 pages, 1993 (see Section 3.6). * * The use of the SVD to clamp up the singular values of the * Jacobian matrix of the pullback transformation for EWA resampling * is taken from the astrophysicist Craig DeForest. It is * implemented in his PDL::Transform code (PDL = Perl Data * Language). */ const double a = dux; const double b = duy; const double c = dvx; const double d = dvy; /* * n is the matrix Jinv * transpose(Jinv). Eigenvalues of n are the * squares of the singular values of Jinv. */ const double aa = a*a; const double bb = b*b; const double cc = c*c; const double dd = d*d; /* * Eigenvectors of n are left singular vectors of Jinv. */ const double n11 = aa+bb; const double n12 = a*c+b*d; const double n21 = n12; const double n22 = cc+dd; const double det = a*d-b*c; const double twice_det = det+det; const double frobenius_squared = n11+n22; const double discriminant = (frobenius_squared+twice_det)*(frobenius_squared-twice_det); /* * In exact arithmetic, discriminant can't be negative. In floating * point, it can, because of the bad conditioning of SVD * decompositions done through the associated normal matrix. */ const double sqrt_discriminant = sqrt(discriminant > 0.0 ? discriminant : 0.0); /* * s1 is the largest singular value of the inverse Jacobian * matrix. In other words, its reciprocal is the smallest singular * value of the Jacobian matrix itself. * If s1 = 0, both singular values are 0, and any orthogonal pair of * left and right factors produces a singular decomposition of Jinv. */ /* * Initially, we only compute the squares of the singular values. */ const double s1s1 = 0.5*(frobenius_squared+sqrt_discriminant); /* * s2 the smallest singular value of the inverse Jacobian * matrix. Its reciprocal is the largest singular value of the * Jacobian matrix itself. */ const double s2s2 = 0.5*(frobenius_squared-sqrt_discriminant); const double s1s1minusn11 = s1s1-n11; const double s1s1minusn22 = s1s1-n22; /* * u1, the first column of the U factor of a singular decomposition * of Jinv, is a (non-normalized) left singular vector corresponding * to s1. It has entries u11 and u21. We compute u1 from the fact * that it is an eigenvector of n corresponding to the eigenvalue * s1^2. */ const double s1s1minusn11_squared = s1s1minusn11*s1s1minusn11; const double s1s1minusn22_squared = s1s1minusn22*s1s1minusn22; /* * The following selects the largest row of n-s1^2 I as the one * which is used to find the eigenvector. If both s1^2-n11 and * s1^2-n22 are zero, n-s1^2 I is the zero matrix. In that case, * any vector is an eigenvector; in addition, norm below is equal to * zero, and, in exact arithmetic, this is the only case in which * norm = 0. So, setting u1 to the simple but arbitrary vector [1,0] * if norm = 0 safely takes care of all cases. */ const double temp_u11 = ( (s1s1minusn11_squared>=s1s1minusn22_squared) ? n12 : s1s1minusn22 ); const double temp_u21 = ( (s1s1minusn11_squared>=s1s1minusn22_squared) ? s1s1minusn11 : n21 ); const double norm = sqrt(temp_u11*temp_u11+temp_u21*temp_u21); /* * Finalize the entries of first left singular vector (associated * with the largest singular value). */ const double u11 = ( (norm>0.0) ? temp_u11/norm : 1.0 ); const double u21 = ( (norm>0.0) ? temp_u21/norm : 0.0 ); /* * Clamp the singular values up to 1. */ *major_mag = ( (s1s1<=1.0) ? 1.0 : sqrt(s1s1) ); *minor_mag = ( (s2s2<=1.0) ? 1.0 : sqrt(s2s2) ); /* * Return the unit major and minor axis direction vectors. */ *major_unit_x = u11; *major_unit_y = u21; *minor_unit_x = -u21; *minor_unit_y = u11; } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e R e s a m p l e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleResampleFilter() does all the calculations needed to resample an image % at a specific scale, defined by two scaling vectors. This not using % a orthogonal scaling, but two distorted scaling vectors, to allow the % generation of a angled ellipse. % % As only two deritive scaling vectors are used the center of the ellipse % must be the center of the lookup. That is any curvature that the % distortion may produce is discounted. % % The input vectors are produced by either finding the derivitives of the % distortion function, or the partial derivitives from a distortion mapping. % They do not need to be the orthogonal dx,dy scaling vectors, but can be % calculated from other derivatives. For example you could use dr,da/r % polar coordinate vector scaling vectors % % If u,v = DistortEquation(x,y) OR u = Fu(x,y); v = Fv(x,y) % Then the scaling vectors are determined from the deritives... % du/dx, dv/dx and du/dy, dv/dy % If the resulting scaling vectors is othogonally aligned then... % dv/dx = 0 and du/dy = 0 % Producing an othogonally alligned ellipse in source space for the area to % be resampled. % % Note that scaling vectors are different to argument order. Argument order % is the general order the deritives are extracted from the distortion % equations, and not the scaling vectors. As such the middle two vaules % may be swapped from what you expect. Caution is advised. % % WARNING: It is assumed that any SetResampleFilter() method call will % always be performed before the ScaleResampleFilter() method, so that the % size of the ellipse will match the support for the resampling filter being % used. % % The format of the ScaleResampleFilter method is: % % void ScaleResampleFilter(const ResampleFilter *resample_filter, % const double dux,const double duy,const double dvx,const double dvy) % % A description of each parameter follows: % % o resample_filter: the resampling resample_filterrmation defining the % image being resampled % % o dux,duy,dvx,dvy: % The deritives or scaling vectors defining the EWA ellipse. % NOTE: watch the order, which is based on the order deritives % are usally determined from distortion equations (see above). % The middle two values may need to be swapped if you are thinking % in terms of scaling vectors. % */ MagickExport void ScaleResampleFilter(ResampleFilter *resample_filter, const double dux,const double duy,const double dvx,const double dvy) { double A,B,C,F; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); resample_filter->limit_reached = MagickFalse; /* A 'point' filter forces use of interpolation instead of area sampling */ if ( resample_filter->filter == PointFilter ) return; /* EWA turned off - nothing to do */ #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "# -----\n" ); (void) FormatLocaleFile(stderr, "dux=%lf; dvx=%lf; duy=%lf; dvy=%lf;\n", dux, dvx, duy, dvy); #endif /* Find Ellipse Coefficents such that A*u^2 + B*u*v + C*v^2 = F With u,v relative to point around which we are resampling. And the given scaling dx,dy vectors in u,v space du/dx,dv/dx and du/dy,dv/dy */ #if EWA /* Direct conversion of derivatives into elliptical coefficients However when magnifying images, the scaling vectors will be small resulting in a ellipse that is too small to sample properly. As such we need to clamp the major/minor axis to a minumum of 1.0 to prevent it getting too small. */ #if EWA_CLAMP { double major_mag, minor_mag, major_x, major_y, minor_x, minor_y; ClampUpAxes(dux,dvx,duy,dvy, &major_mag, &minor_mag, &major_x, &major_y, &minor_x, &minor_y); major_x *= major_mag; major_y *= major_mag; minor_x *= minor_mag; minor_y *= minor_mag; #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "major_x=%lf; major_y=%lf; minor_x=%lf; minor_y=%lf;\n", major_x, major_y, minor_x, minor_y); #endif A = major_y*major_y+minor_y*minor_y; B = -2.0*(major_x*major_y+minor_x*minor_y); C = major_x*major_x+minor_x*minor_x; F = major_mag*minor_mag; F *= F; /* square it */ } #else /* raw unclamped EWA */ A = dvx*dvx+dvy*dvy; B = -2.0*(dux*dvx+duy*dvy); C = dux*dux+duy*duy; F = dux*dvy-duy*dvx; F *= F; /* square it */ #endif /* EWA_CLAMP */ #else /* HQ_EWA */ /* This Paul Heckbert's "Higher Quality EWA" formula, from page 60 in his thesis, which adds a unit circle to the elliptical area so as to do both Reconstruction and Prefiltering of the pixels in the resampling. It also means it is always likely to have at least 4 pixels within the area of the ellipse, for weighted averaging. No scaling will result with F == 4.0 and a circle of radius 2.0, and F smaller than this means magnification is being used. NOTE: This method produces a very blury result at near unity scale while producing perfect results for strong minitification and magnifications. However filter support is fixed to 2.0 (no good for Windowed Sinc filters) */ A = dvx*dvx+dvy*dvy+1; B = -2.0*(dux*dvx+duy*dvy); C = dux*dux+duy*duy+1; F = A*C - B*B/4; #endif #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "A=%lf; B=%lf; C=%lf; F=%lf\n", A,B,C,F); /* Figure out the various information directly about the ellipse. This information currently not needed at this time, but may be needed later for better limit determination. It is also good to have as a record for future debugging */ { double alpha, beta, gamma, Major, Minor; double Eccentricity, Ellipse_Area, Ellipse_Angle; alpha = A+C; beta = A-C; gamma = sqrt(beta*beta + B*B ); if ( alpha - gamma <= MagickEpsilon ) Major=MagickMaximumValue; else Major=sqrt(2*F/(alpha - gamma)); Minor = sqrt(2*F/(alpha + gamma)); (void) FormatLocaleFile(stderr, "# Major=%lf; Minor=%lf\n", Major, Minor ); /* other information about ellipse include... */ Eccentricity = Major/Minor; Ellipse_Area = MagickPI*Major*Minor; Ellipse_Angle = atan2(B, A-C); (void) FormatLocaleFile(stderr, "# Angle=%lf Area=%lf\n", (double) RadiansToDegrees(Ellipse_Angle), Ellipse_Area); } #endif /* If one or both of the scaling vectors is impossibly large (producing a very large raw F value), we may as well not bother doing any form of resampling since resampled area is very large. In this case some alternative means of pixel sampling, such as the average of the whole image is needed to get a reasonable result. Calculate only as needed. */ if ( (4*A*C - B*B) > MagickMaximumValue ) { resample_filter->limit_reached = MagickTrue; return; } /* Scale ellipse to match the filters support (that is, multiply F by the square of the support) Simplier to just multiply it by the support twice! */ F *= resample_filter->support; F *= resample_filter->support; /* Orthogonal bounds of the ellipse */ resample_filter->Ulimit = sqrt(C*F/(A*C-0.25*B*B)); resample_filter->Vlimit = sqrt(A*F/(A*C-0.25*B*B)); /* Horizontally aligned parallelogram fitted to Ellipse */ resample_filter->Uwidth = sqrt(F/A); /* Half of the parallelogram width */ resample_filter->slope = -B/(2.0*A); /* Reciprocal slope of the parallelogram */ #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "Ulimit=%lf; Vlimit=%lf; UWidth=%lf; Slope=%lf;\n", resample_filter->Ulimit, resample_filter->Vlimit, resample_filter->Uwidth, resample_filter->slope ); #endif /* Check the absolute area of the parallelogram involved. * This limit needs more work, as it is too slow for larger images * with tiled views of the horizon. */ if ( (resample_filter->Uwidth * resample_filter->Vlimit) > (4.0*resample_filter->image_area)) { resample_filter->limit_reached = MagickTrue; return; } /* Scale ellipse formula to directly index the Filter Lookup Table */ { register double scale; #if FILTER_LUT /* scale so that F = WLUT_WIDTH; -- hardcoded */ scale = (double)WLUT_WIDTH/F; #else /* scale so that F = resample_filter->F (support^2) */ scale = resample_filter->F/F; #endif resample_filter->A = A*scale; resample_filter->B = B*scale; resample_filter->C = C*scale; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilter() set the resampling filter lookup table based on a % specific filter. Note that the filter is used as a radial filter not as a % two pass othogonally aligned resampling filter. % % The format of the SetResampleFilter method is: % % void SetResampleFilter(ResampleFilter *resample_filter, % const FilterType filter) % % A description of each parameter follows: % % o resample_filter: resampling resample_filterrmation structure % % o filter: the resize filter for elliptical weighting LUT % */ MagickExport void SetResampleFilter(ResampleFilter *resample_filter, const FilterType filter) { ResizeFilter *resize_filter; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); resample_filter->do_interpolate = MagickFalse; resample_filter->filter = filter; /* Default cylindrical filter is a Cubic Keys filter */ if ( filter == UndefinedFilter ) resample_filter->filter = RobidouxFilter; if ( resample_filter->filter == PointFilter ) { resample_filter->do_interpolate = MagickTrue; return; /* EWA turned off - nothing more to do */ } resize_filter = AcquireResizeFilter(resample_filter->image, resample_filter->filter,MagickTrue,resample_filter->exception); if (resize_filter == (ResizeFilter *) NULL) { (void) ThrowMagickException(resample_filter->exception,GetMagickModule(), ModuleError, "UnableToSetFilteringValue", "Fall back to Interpolated 'Point' filter"); resample_filter->filter = PointFilter; resample_filter->do_interpolate = MagickTrue; return; /* EWA turned off - nothing more to do */ } /* Get the practical working support for the filter, * after any API call blur factors have been accoded for. */ #if EWA resample_filter->support = GetResizeFilterSupport(resize_filter); #else resample_filter->support = 2.0; /* fixed support size for HQ-EWA */ #endif #if FILTER_LUT /* Fill the LUT with the weights from the selected filter function */ { register int Q; double r_scale; /* Scale radius so the filter LUT covers the full support range */ r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH); for(Q=0; Q<WLUT_WIDTH; Q++) resample_filter->filter_lut[Q] = (double) GetResizeFilterWeight(resize_filter,sqrt((double)Q)*r_scale); /* finished with the resize filter */ resize_filter = DestroyResizeFilter(resize_filter); } #else /* save the filter and the scaled ellipse bounds needed for filter */ resample_filter->filter_def = resize_filter; resample_filter->F = resample_filter->support*resample_filter->support; #endif /* Adjust the scaling of the default unit circle This assumes that any real scaling changes will always take place AFTER the filter method has been initialized. */ ScaleResampleFilter(resample_filter, 1.0, 0.0, 0.0, 1.0); #if 0 /* This is old code kept as a reference only. Basically it generates a Gaussian bell curve, with sigma = 0.5 if the support is 2.0 Create Normal Gaussian 2D Filter Weighted Lookup Table. A normal EWA guassual lookup would use exp(Q*ALPHA) where Q = distance squared from 0.0 (center) to 1.0 (edge) and ALPHA = -4.0*ln(2.0) ==> -2.77258872223978123767 The table is of length 1024, and equates to support radius of 2.0 thus needs to be scaled by ALPHA*4/1024 and any blur factor squared The it comes from reference code provided by Fred Weinhaus. */ r_scale = -2.77258872223978123767/(WLUT_WIDTH*blur*blur); for(Q=0; Q<WLUT_WIDTH; Q++) resample_filter->filter_lut[Q] = exp((double)Q*r_scale); resample_filter->support = WLUT_WIDTH; #endif #if FILTER_LUT #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp single #endif { if (IsStringTrue(GetImageArtifact(resample_filter->image, "resample:verbose")) != MagickFalse) { register int Q; double r_scale; /* Debug output of the filter weighting LUT Gnuplot the LUT data, the x scale index has been adjusted plot [0:2][-.2:1] "lut.dat" with lines The filter values should be normalized for comparision */ printf("#\n"); printf("# Resampling Filter LUT (%d values) for '%s' filter\n", WLUT_WIDTH, CommandOptionToMnemonic(MagickFilterOptions, resample_filter->filter) ); printf("#\n"); printf("# Note: values in table are using a squared radius lookup.\n"); printf("# As such its distribution is not uniform.\n"); printf("#\n"); printf("# The X value is the support distance for the Y weight\n"); printf("# so you can use gnuplot to plot this cylindrical filter\n"); printf("# plot [0:2][-.2:1] \"lut.dat\" with lines\n"); printf("#\n"); /* Scale radius so the filter LUT covers the full support range */ r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH); for(Q=0; Q<WLUT_WIDTH; Q++) printf("%8.*g %.*g\n", GetMagickPrecision(),sqrt((double)Q)*r_scale, GetMagickPrecision(),resample_filter->filter_lut[Q] ); printf("\n\n"); /* generate a 'break' in gnuplot if multiple outputs */ } /* Output the above once only for each image, and each setting (void) DeleteImageArtifact(resample_filter->image,"resample:verbose"); */ } #endif /* FILTER_LUT */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r I n t e r p o l a t e M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilterInterpolateMethod() sets the resample filter interpolation % method. % % The format of the SetResampleFilterInterpolateMethod method is: % % MagickBooleanType SetResampleFilterInterpolateMethod( % ResampleFilter *resample_filter,const InterpolateMethod method) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o method: the interpolation method. % */ MagickExport MagickBooleanType SetResampleFilterInterpolateMethod( ResampleFilter *resample_filter,const PixelInterpolateMethod method) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->interpolate=method; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilterVirtualPixelMethod() changes the virtual pixel method % associated with the specified resample filter. % % The format of the SetResampleFilterVirtualPixelMethod method is: % % MagickBooleanType SetResampleFilterVirtualPixelMethod( % ResampleFilter *resample_filter,const VirtualPixelMethod method) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o method: the virtual pixel method. % */ MagickExport MagickBooleanType SetResampleFilterVirtualPixelMethod( ResampleFilter *resample_filter,const VirtualPixelMethod method) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->virtual_pixel=method; if (method != UndefinedVirtualPixelMethod) (void) SetCacheViewVirtualPixelMethod(resample_filter->view,method); return(MagickTrue); }
bricksetup.h
/** * @file * @brief Brick iterator and setup code */ #ifndef BRICK_SETUP_H #define BRICK_SETUP_H #include <vector> #include <typeinfo> #include <initializer_list> #include <algorithm> #include "brick.h" struct RunningTag { }; struct StopTag { }; template<unsigned select> struct TagSelect { static constexpr RunningTag value = RunningTag(); }; template<> struct TagSelect<0> { static constexpr StopTag value = StopTag(); }; template<unsigned dims, unsigned d> inline void init_fill(const std::vector<long> &stride, unsigned *adjlist, unsigned *grid_ptr, unsigned *low, unsigned *high, RunningTag t) { unsigned str = static_power<3, d - 1>::value; init_fill<dims, d - 1>(stride, adjlist, grid_ptr - stride[dims - d], low, high, TagSelect<d - 1>::value); init_fill<dims, d - 1>(stride, adjlist + str, grid_ptr, low, high, TagSelect<d - 1>::value); init_fill<dims, d - 1>(stride, adjlist + str * 2, grid_ptr + stride[dims - d], low, high, TagSelect<d - 1>::value); } template<unsigned dims, unsigned d> inline void init_fill(const std::vector<long> &stride, unsigned *adjlist, unsigned *grid_ptr, unsigned *low, unsigned *high, StopTag t) { if (grid_ptr >= low && grid_ptr < high) *adjlist = *grid_ptr; else *adjlist = 0; } template<unsigned dims, unsigned d> inline void init_iter(const std::vector<long> &dimlist, const std::vector<long> &stride, BrickInfo<dims> &bInfo, unsigned *grid_ptr, unsigned *low, unsigned *high, RunningTag t) { if (dims == d) { #pragma omp parallel for for (long s = 0; s < dimlist[dims - d]; ++s) init_iter<dims, d - 1>(dimlist, stride, bInfo, grid_ptr + s * stride[dims - d], low, high, TagSelect<d - 1>::value); } else { for (long s = 0; s < dimlist[dims - d]; ++s) init_iter<dims, d - 1>(dimlist, stride, bInfo, grid_ptr + s * stride[dims - d], low, high, TagSelect<d - 1>::value); } } template<unsigned dims, unsigned d> inline void init_iter(const std::vector<long> &dimlist, const std::vector<long> &stride, BrickInfo<dims> &bInfo, unsigned *grid_ptr, unsigned *low, unsigned *high, StopTag t) { init_fill<dims, dims>(stride, bInfo.adj[*grid_ptr], grid_ptr, low, high, RunningTag()); } template<unsigned dims> BrickInfo<dims> init_grid(unsigned *&grid_ptr, const std::vector<long> &dimlist) { long size = 1; for (const auto a: dimlist) size *= a; grid_ptr = (unsigned *) malloc(size * sizeof(unsigned)); for (unsigned pos = 0; pos < size; ++pos) grid_ptr[pos] = pos; BrickInfo<dims> bInfo(size); long tsize = size; std::vector<long> stride; for (const auto a: dimlist) { size = size / a; stride.push_back(size); } init_iter<dims, dims>(dimlist, stride, bInfo, grid_ptr, grid_ptr, grid_ptr + tsize, RunningTag()); return bInfo; } template<unsigned dims, unsigned d, typename F, typename A> inline void fill(const std::vector<long> &tile, const std::vector<long> &stride, bElem *arr, A a, F f, RunningTag t) { for (long s = 0; s < tile[d - 1]; ++s) fill<dims, d - 1>(tile, stride, arr + s * stride[d - 1], a[s], f, TagSelect<d - 1>::value); } template<unsigned dims, unsigned d, typename F, typename A> inline void fill(const std::vector<long> &tile, const std::vector<long> &stride, bElem *arr, A &a, F f, StopTag t) { f(a, arr); } template<unsigned dims, unsigned d, typename T, typename F> inline void iter(const std::vector<long> &dimlist, const std::vector<long> &tile, const std::vector<long> &strideA, const std::vector<long> &strideB, const std::vector<long> &padding, const std::vector<long> &ghost, T &brick, bElem *arr, unsigned *grid_ptr, F f, RunningTag t) { constexpr unsigned dimp = d - 1; if (dims == d) { #pragma omp parallel for for (long s = ghost[dimp] / tile[dimp]; s < (dimlist[dimp] + ghost[dimp]) / tile[dimp]; ++s) iter<dims, d - 1>(dimlist, tile, strideA, strideB, padding, ghost, brick, arr + (padding[dimp] + s * tile[dimp]) * strideA[dimp], grid_ptr + s * strideB[dimp], f, TagSelect<dimp>::value); } else { for (long s = ghost[dimp] / tile[dimp]; s < (dimlist[dimp] + ghost[dimp]) / tile[dimp]; ++s) iter<dims, d - 1>(dimlist, tile, strideA, strideB, padding, ghost, brick, arr + (padding[dimp] + s * tile[dimp]) * strideA[dimp], grid_ptr + s * strideB[dimp], f, TagSelect<dimp>::value); } } template<unsigned dims, unsigned d, typename T, typename F> inline void iter(const std::vector<long> &dimlist, const std::vector<long> &tile, const std::vector<long> &strideA, const std::vector<long> &strideB, const std::vector<long> &padding, const std::vector<long> &ghost, T &brick, bElem *arr, unsigned *grid_ptr, F f, StopTag t) { fill<dims, dims>(tile, strideA, arr, brick[*grid_ptr], f, RunningTag()); } /* * Iterate elements side by side in brick and arrays. * * dimlist: the internal regions, iterated * padding: the padding necessary for arrays, skipped * ghost: the padding for both, skipped * f: F (&bElem, *bElem) -> void */ template<unsigned dims, typename F, typename T, unsigned ... BDims> inline void iter_grid(const std::vector<long> &dimlist, const std::vector<long> &padding, const std::vector<long> &ghost, bElem *arr, unsigned *grid_ptr, Brick<Dim<BDims...>, T> &brick, F f) { std::vector<long> strideA; std::vector<long> strideB; std::vector<long> tile = {BDims...}; // Arrays are contiguous first std::reverse(tile.begin(), tile.end()); long sizeA = 1; long sizeB = 1; for (long a = 0; a < dimlist.size(); ++a) { strideA.push_back(sizeA); strideB.push_back(sizeB); sizeA *= (dimlist[a] + 2 * (padding[a] + ghost[a])); sizeB *= ((dimlist[a] + 2 * ghost[a]) / tile[a]); } iter<dims, dims>(dimlist, tile, strideA, strideB, padding, ghost, brick, arr, grid_ptr, f, RunningTag()); } /** * @brief Copy values from an array to bricks * @tparam dims number of dimensions * @tparam T type for brick * @param dimlist dimensions, contiguous first * @param padding padding applied to array format (skipped) * @param ghost padding applied to array and brick (skipped) * @param arr array input * @param grid_ptr the grid array contains indices of bricks * @param brick the brick data structure */ template<unsigned dims, typename T> inline void copyToBrick(const std::vector<long> &dimlist, const std::vector<long> &padding, const std::vector<long> &ghost, bElem *arr, unsigned *grid_ptr, T &brick) { auto f = [](bElem &brick, bElem *arr) -> void { brick = *arr; }; iter_grid<dims>(dimlist, padding, ghost, arr, grid_ptr, brick, f); } /** * @brief Copy values from an array to bricks without ghost or padding * @tparam dims * @tparam T * @param dimlist * @param arr * @param grid_ptr * @param brick * * For parameters see copyToBrick(const std::vector<long> &dimlist, const std::vector<long> &padding, const std::vector<long> &ghost, bElem *arr, unsigned *grid_ptr, T &brick) */ template<unsigned dims, typename T> inline void copyToBrick(const std::vector<long> &dimlist, bElem *arr, unsigned *grid_ptr, T &brick) { std::vector<long> padding(dimlist.size(), 0); std::vector<long> ghost(dimlist.size(), 0); copyToBrick<dims>(dimlist, padding, ghost, arr, grid_ptr, brick); } /** * @brief Copy values from bricks to an array * @tparam dims number of dimensions * @tparam T type for brick * @param dimlist dimensions, contiguous first * @param padding padding applied to array format (skipped) * @param ghost padding applied to array and brick (skipped) * @param arr array input * @param grid_ptr the grid array contains indices of bricks * @param brick the brick data structure */ template<unsigned dims, typename T> inline void copyFromBrick(const std::vector<long> &dimlist, const std::vector<long> &padding, const std::vector<long> &ghost, bElem *arr, unsigned *grid_ptr, T &brick) { auto f = [](bElem &brick, bElem *arr) -> void { *arr = brick; }; iter_grid<dims>(dimlist, padding, ghost, arr, grid_ptr, brick, f); } #endif
omp-taskgroup-single.c
#include <omp.h> #include <unistd.h> #include <stdio.h> #define THREADS 4 #define LEN 25 int main(void) { int j=0; #pragma omp parallel num_threads(THREADS) #pragma omp single #pragma omp taskgroup for (j=0; j<LEN; j++) { #pragma omp task {usleep(10);} } return 0; }
GB_concat_sparse.c
//------------------------------------------------------------------------------ // GB_concat_sparse: concatenate an array of matrices into a sparse matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #define GB_FREE_WORKSPACE \ if (S != NULL) \ { \ for (int64_t k = 0 ; k < m * n ; k++) \ { \ GB_Matrix_free (&(S [k])) ; \ } \ } \ GB_FREE_WORK (&S, S_size) ; \ GB_FREE_WORK (&Work, Work_size) ; \ GB_WERK_POP (A_ek_slicing, int64_t) ; #define GB_FREE_ALL \ { \ GB_FREE_WORKSPACE ; \ GB_phbix_free (C) ; \ } #include "GB_concat.h" GrB_Info GB_concat_sparse // concatenate into a sparse matrix ( GrB_Matrix C, // input/output matrix for results const bool C_iso, // if true, construct C as iso const GB_void *cscalar, // iso value of C, if C is io const int64_t cnz, // # of entries in C const GrB_Matrix *Tiles, // 2D row-major array of size m-by-n, const GrB_Index m, const GrB_Index n, const int64_t *restrict Tile_rows, // size m+1 const int64_t *restrict Tile_cols, // size n+1 GB_Context Context ) { //-------------------------------------------------------------------------- // allocate C as a sparse matrix //-------------------------------------------------------------------------- GrB_Info info ; GrB_Matrix A = NULL ; ASSERT_MATRIX_OK (C, "C input to concat sparse", GB0) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; int64_t *Work = NULL ; size_t Work_size = 0 ; GrB_Matrix *S = NULL ; size_t S_size = 0 ; GrB_Type ctype = C->type ; int64_t cvlen = C->vlen ; int64_t cvdim = C->vdim ; bool csc = C->is_csc ; size_t csize = ctype->size ; GB_Type_code ccode = ctype->code ; float hyper_switch = C->hyper_switch ; float bitmap_switch = C->bitmap_switch ; int sparsity_control = C->sparsity_control ; GB_phbix_free (C) ; // set C->iso = C_iso OK GB_OK (GB_new_bix (&C, // existing header ctype, cvlen, cvdim, GB_Ap_malloc, csc, GxB_SPARSE, false, hyper_switch, cvdim, cnz, true, C_iso, Context)) ; C->bitmap_switch = bitmap_switch ; C->sparsity_control = sparsity_control ; int64_t *restrict Cp = C->p ; int64_t *restrict Ci = C->i ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; if (C_iso) { memcpy (C->x, cscalar, csize) ; } //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- int64_t nouter = csc ? n : m ; int64_t ninner = csc ? m : n ; Work = GB_CALLOC_WORK (ninner * cvdim, int64_t, &Work_size) ; S = GB_CALLOC_WORK (m * n, GrB_Matrix, &S_size) ; if (S == NULL || Work == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // count entries in each vector of each tile //-------------------------------------------------------------------------- for (int64_t outer = 0 ; outer < nouter ; outer++) { for (int64_t inner = 0 ; inner < ninner ; inner++) { //------------------------------------------------------------------ // get the tile A; transpose and typecast, if needed //------------------------------------------------------------------ A = csc ? GB_TILE (Tiles, inner, outer) : GB_TILE (Tiles, outer, inner) ; GrB_Matrix T = NULL ; ASSERT_MATRIX_OK (A, "A tile for concat sparse", GB0) ; if (csc != A->is_csc) { // T = (ctype) A', not in-place, using a dynamic header GB_OK (GB_new (&T, // auto sparsity, new header A->type, A->vdim, A->vlen, GB_Ap_null, csc, GxB_AUTO_SPARSITY, -1, 1, Context)) ; // save T in array S if (csc) { GB_TILE (S, inner, outer) = T ; } else { GB_TILE (S, outer, inner) = T ; } GB_OK (GB_transpose_cast (T, ctype, csc, A, false, Context)) ; A = T ; GB_MATRIX_WAIT (A) ; ASSERT_MATRIX_OK (A, "T=A' for concat sparse", GB0) ; } ASSERT (C->is_csc == A->is_csc) ; ASSERT (!GB_ANY_PENDING_WORK (A)) ; //------------------------------------------------------------------ // ensure the tile is not bitmap //------------------------------------------------------------------ if (GB_IS_BITMAP (A)) { if (T == NULL) { // copy A into T // set T->iso = A->iso OK: no burble needed GB_OK (GB_dup_worker (&T, A->iso, A, true, NULL, Context)) ; // save T in array S if (csc) { GB_TILE (S, inner, outer) = T ; } else { GB_TILE (S, outer, inner) = T ; } ASSERT_MATRIX_OK (T, "T=dup(A) for concat sparse", GB0) ; } // convert T from bitmap to sparse GB_OK (GB_convert_bitmap_to_sparse (T, Context)) ; ASSERT_MATRIX_OK (T, "T bitmap to sparse, concat sparse", GB0) ; A = T ; } ASSERT (!GB_IS_BITMAP (A)) ; //------------------------------------------------------------------ // log the # of entries in each vector of the tile A //------------------------------------------------------------------ const int64_t anvec = A->nvec ; const int64_t avlen = A->vlen ; int64_t cvstart = csc ? Tile_cols [outer] : Tile_rows [outer] ; int64_t *restrict W = Work + inner * cvdim + cvstart ; int nth = GB_nthreads (anvec, chunk, nthreads_max) ; if (GB_IS_FULL (A)) { // A is full int64_t j ; #pragma omp parallel for num_threads(nth) schedule(static) for (j = 0 ; j < anvec ; j++) { // W [j] = # of entries in A(:,j), which is just avlen W [j] = avlen ; } } else { // A is sparse or hyper int64_t k ; int64_t *restrict Ah = A->h ; int64_t *restrict Ap = A->p ; #pragma omp parallel for num_threads(nth) schedule(static) for (k = 0 ; k < anvec ; k++) { // W [j] = # of entries in A(:,j), the kth column of A int64_t j = GBH (Ah, k) ; W [j] = Ap [k+1] - Ap [k] ; } } } } //-------------------------------------------------------------------------- // cumulative sum of entries in each tile //-------------------------------------------------------------------------- int nth = GB_nthreads (ninner*cvdim, chunk, nthreads_max) ; int64_t k ; #pragma omp parallel for num_threads(nth) schedule(static) for (k = 0 ; k < cvdim ; k++) { int64_t s = 0 ; for (int64_t inner = 0 ; inner < ninner ; inner++) { int64_t p = inner * cvdim + k ; int64_t c = Work [p] ; Work [p] = s ; s += c ; } // total number of entries in C(:,k) Cp [k] = s ; } GB_cumsum (Cp, cvdim, &(C->nvec_nonempty), nthreads_max, Context) ; #pragma omp parallel for num_threads(nth) schedule(static) for (k = 0 ; k < cvdim ; k++) { int64_t pC = Cp [k] ; for (int64_t inner = 0 ; inner < ninner ; inner++) { int64_t p = inner * cvdim + k ; Work [p] += pC ; } } //-------------------------------------------------------------------------- // concatenate all matrices into C //-------------------------------------------------------------------------- for (int64_t outer = 0 ; outer < nouter ; outer++) { for (int64_t inner = 0 ; inner < ninner ; inner++) { //------------------------------------------------------------------ // get the tile A, either the temporary matrix T or the original A //------------------------------------------------------------------ A = csc ? GB_TILE (S, inner, outer) : GB_TILE (S, outer, inner) ; if (A == NULL) { A = csc ? GB_TILE (Tiles, inner, outer) : GB_TILE (Tiles, outer, inner) ; } ASSERT_MATRIX_OK (A, "A tile again, concat sparse", GB0) ; ASSERT (!GB_IS_BITMAP (A)) ; ASSERT (C->is_csc == A->is_csc) ; ASSERT (!GB_ANY_PENDING_WORK (A)) ; GB_Type_code acode = A->type->code ; //------------------------------------------------------------------ // determine where to place the tile in C //------------------------------------------------------------------ // The tile A appears in vectors cvstart:cvend-1 of C, and indices // cistart:ciend-1. int64_t cvstart, cvend, cistart, ciend ; if (csc) { // C and A are held by column // Tiles is row-major and accessed in column order cvstart = Tile_cols [outer] ; cvend = Tile_cols [outer+1] ; cistart = Tile_rows [inner] ; ciend = Tile_rows [inner+1] ; } else { // C and A are held by row // Tiles is row-major and accessed in row order cvstart = Tile_rows [outer] ; cvend = Tile_rows [outer+1] ; cistart = Tile_cols [inner] ; ciend = Tile_cols [inner+1] ; } // get the workspace pointer array W for this tile int64_t *restrict W = Work + inner * cvdim + cvstart ; //------------------------------------------------------------------ // slice the tile //------------------------------------------------------------------ int64_t avdim = cvend - cvstart ; int64_t avlen = ciend - cistart ; ASSERT (avdim == A->vdim) ; ASSERT (avlen == A->vlen) ; int A_nthreads, A_ntasks ; const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int64_t *restrict Ai = A->i ; const bool A_iso = A->iso ; GB_SLICE_MATRIX (A, 1, chunk) ; //------------------------------------------------------------------ // copy the tile A into C //------------------------------------------------------------------ bool done = false ; if (C_iso) { //-------------------------------------------------------------- // C and A are iso //-------------------------------------------------------------- #define GB_ISO_CONCAT #define GB_COPY(pC,pA,A_iso) ; #include "GB_concat_sparse_template.c" } else { #ifndef GBCUDA_DEV if (ccode == acode) { // no typecasting needed switch (csize) { #undef GB_COPY #define GB_COPY(pC,pA,A_iso) \ Cx [pC] = GBX (Ax, pA, A_iso) ; case GB_1BYTE : // uint8, int8, bool, or 1-byte user #define GB_CTYPE uint8_t #include "GB_concat_sparse_template.c" break ; case GB_2BYTE : // uint16, int16, or 2-byte user #define GB_CTYPE uint16_t #include "GB_concat_sparse_template.c" break ; case GB_4BYTE : // uint32, int32, float, or 4-byte user #define GB_CTYPE uint32_t #include "GB_concat_sparse_template.c" break ; case GB_8BYTE : // uint64, int64, double, float complex, // or 8-byte user defined #define GB_CTYPE uint64_t #include "GB_concat_sparse_template.c" break ; case GB_16BYTE : // double complex or 16-byte user #define GB_CTYPE GB_blob16 #include "GB_concat_sparse_template.c" break ; default:; } } #endif } if (!done) { // with typecasting or user-defined types GB_cast_function cast_A_to_C = GB_cast_factory (ccode, acode) ; size_t asize = A->type->size ; #define GB_CTYPE GB_void #undef GB_COPY #define GB_COPY(pC,pA,A_iso) \ cast_A_to_C (Cx + (pC)*csize, \ Ax + (A_iso ? 0:(pA)*asize), asize) ; #include "GB_concat_sparse_template.c" } GB_WERK_POP (A_ek_slicing, int64_t) ; } } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; C->magic = GB_MAGIC ; ASSERT_MATRIX_OK (C, "C from concat sparse", GB0) ; return (GrB_SUCCESS) ; }
GB_unop__identity_uint64_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_int64) // op(A') function: GB (_unop_tran__identity_uint64_int64) // C type: uint64_t // A type: int64_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = (uint64_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_int64) ( uint64_t *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_int64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
opencl_keychain_fmt_plug.c
/* * Modified by Dhiru Kholia <dhiru at openwall.com> for Keychain format. * * This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_keychain; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_keychain); #else #include <string.h> #include <openssl/des.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "formats.h" #include "common.h" #include "misc.h" #include "options.h" #include "common-opencl.h" #define FORMAT_LABEL "keychain-opencl" #define FORMAT_NAME "Mac OS X Keychain" #define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL 3DES" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define SWAP(n) \ (((n) << 24) | (((n) & 0xff00) << 8) | (((n) >> 8) & 0xff00) | ((n) >> 24)) #define BINARY_SIZE 0 #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(*salt_struct) #define BINARY_ALIGN MEM_ALIGN_WORD #define SALT_ALIGN MEM_ALIGN_WORD #define SALTLEN 20 #define IVLEN 8 #define CTLEN 48 #define uint8_t unsigned char #define uint16_t unsigned short #define uint32_t ARCH_WORD_32 #define OCL_CONFIG "keychain" typedef struct { uint32_t length; uint8_t v[PLAINTEXT_LENGTH]; } keychain_password; typedef struct { uint32_t v[32/4]; } keychain_hash; typedef struct { uint8_t length; uint8_t salt[SALTLEN]; uint32_t iterations; uint32_t outlen; } keychain_salt; static int *cracked; static int any_cracked; static struct fmt_tests keychain_tests[] = { {"$keychain$*10f7445c8510fa40d9ef6b4e0f8c772a9d37e449*f3d19b2a45cdcccb*8c3c3b1c7d48a24dad4ccbd4fd794ca9b0b3f1386a0a4527f3548bfe6e2f1001804b082076641bbedbc9f3a7c33c084b", "password"}, // these were generated with pass_gen.pl. NOTE, they ALL have the data (which gets encrypted) which was decrypted from the above hash. {"$keychain$*a88cd6fbaaf40bc5437eee015a0f95ab8ab70545*b12372b1b7cb5c1f*1f5c596bcdd015afc126bc86f42dd092cb9d531d14a0aafaa89283f1bebace60562d497332afbd952fd329cc864144ec", "password"}, {"$keychain$*23328e264557b93204dc825c46a25f7fb1e17d4a*19a9efde2ca98d30*6ac89184134758a95c61bd274087ae0cffcf49f433c7f91edea98bd4fd60094e2936d99e4d985dec98284379f23259c0", "hhh"}, {"$keychain$*927717d8509db73aa47c5e820e3a381928b5e048*eef33a4a1483ae45*a52691580f17e295b8c2320947968503c605b2784bfe4851077782139f0de46f71889835190c361870baa56e2f4e9e43", "JtR-Jumbo"}, {"$keychain$*1fab88d0b8ea1a3d303e0aef519796eb29e46299*3358b0e77d60892f*286f975dcd191024227514ed9939d0fa94034294ba1eca6d5c767559e75e944b5a2fcb54fd696be64c64f9d069ce628a", "really long password -----------------------------"}, {NULL} }; static struct custom_salt { unsigned char salt[SALTLEN]; unsigned char iv[IVLEN]; unsigned char ct[CTLEN]; } *salt_struct; static cl_int cl_error; static keychain_password *inbuffer; static keychain_hash *outbuffer; static keychain_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; size_t insize, outsize, settingsize, cracked_size; #define MIN(a, b) (((a) > (b)) ? (b) : (a)) #define MAX(a, b) (((a) > (b)) ? (a) : (b)) #define OCL_CONFIG "keychain" #define STEP 0 #define SEED 256 // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" static const char * warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static size_t get_task_max_size() { return 0; } static size_t get_default_workgroup() { if (cpu(device_info[gpu_id])) return get_platform_vendor_id(platform_id) == DEV_INTEL ? 8 : 1; else return 64; } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(keychain_password) * gws; outsize = sizeof(keychain_hash) * gws; settingsize = sizeof(keychain_salt); cracked_size = sizeof(*cracked) * gws; inbuffer = mem_calloc(insize); outbuffer = mem_alloc(outsize); cracked = mem_calloc(cracked_size); /// Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(cracked); } static void done(void) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); } static void init(struct fmt_main *self) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d", PLAINTEXT_LENGTH, (int)sizeof(currentsalt.salt), (int)sizeof(outbuffer->v)); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(keychain_password), 0); // Auto tune execution from shared/included code. autotune_run(self, 1, 0, 1000); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; if (strncmp(ciphertext, "$keychain$*", 11) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 11; if ((p = strtok(ctcopy, "*")) == NULL) /* salt */ goto err; if(strlen(p) != SALTLEN * 2) goto err; if ((p = strtok(NULL, "*")) == NULL) /* iv */ goto err; if(strlen(p) != IVLEN * 2) goto err; if ((p = strtok(NULL, "*")) == NULL) /* ciphertext */ goto err; if(strlen(p) != CTLEN * 2) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; salt_struct = mem_calloc_tiny(sizeof(struct custom_salt), MEM_ALIGN_WORD); ctcopy += 11; /* skip over "$keychain$*" */ p = strtok(ctcopy, "*"); for (i = 0; i < SALTLEN; i++) salt_struct->salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "*"); for (i = 0; i < IVLEN; i++) salt_struct->iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "*"); for (i = 0; i < CTLEN; i++) salt_struct->ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)salt_struct; } static void set_salt(void *salt) { salt_struct = (struct custom_salt *)salt; memcpy((char*)currentsalt.salt, salt_struct->salt, 20); currentsalt.length = 20; currentsalt.iterations = 1000; currentsalt.outlen = 24; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy salt to gpu"); } #undef set_key static void set_key(char *key, int index) { uint8_t length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; inbuffer[index].length = length; memcpy(inbuffer[index].v, key, length); } static char *get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; uint8_t length = inbuffer[index].length; memcpy(ret, inbuffer[index].v, length); ret[length] = '\0'; return ret; } static int kcdecrypt(unsigned char *key, unsigned char *iv, unsigned char *data) { unsigned char out[CTLEN]; int pad, n, i; DES_cblock key1, key2, key3; DES_cblock ivec; DES_key_schedule ks1, ks2, ks3; memset(out, 0, sizeof(out)); memcpy(key1, key, 8); memcpy(key2, key + 8, 8); memcpy(key3, key + 16, 8); DES_set_key((C_Block *) key1, &ks1); DES_set_key((C_Block *) key2, &ks2); DES_set_key((C_Block *) key3, &ks3); memcpy(ivec, iv, 8); DES_ede3_cbc_encrypt(data, out, CTLEN, &ks1, &ks2, &ks3, &ivec, DES_DECRYPT); // now check padding pad = out[47]; if(pad > 8) // "Bad padding byte. You probably have a wrong password" return -1; if(pad != 4) /* possible bug here, is this assumption always valid? */ return -1; n = CTLEN - pad; for(i = n; i < CTLEN; i++) if(out[i] != pad) // "Bad padding. You probably have a wrong password" return -1; return 0; } #if 0 //#ifdef DEBUG static void print_hex(unsigned char *str, int len) { int i; for (i = 0; i < len; ++i) printf("%02x", str[i]); printf("\n"); } #endif static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index; global_work_size = (count + local_work_size - 1) / local_work_size * local_work_size; if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } /// Copy data to gpu HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); /// Run kernel HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, &local_work_size, 0, NULL, multi_profilingEvent[1]), "Run kernel"); /// Read the result back HANDLE_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) if (!kcdecrypt((unsigned char*)outbuffer[index].v, salt_struct->iv, salt_struct->ct)) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_opencl_keychain = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT, #if FMT_MAIN_VERSION > 11 { NULL }, #endif keychain_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */